Print this page
    
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/i86pc/vm/vm_machdep.c
          +++ new/usr/src/uts/i86pc/vm/vm_machdep.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25   25   * Copyright (c) 2010, Intel Corporation.
  26   26   * All rights reserved.
  27   27   * Copyright (c) 2014, Joyent, Inc. All rights reserved.
  28   28   */
  29   29  
  30   30  /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
  31   31  /*      All Rights Reserved   */
  32   32  
  33   33  /*
  34   34   * Portions of this source code were derived from Berkeley 4.3 BSD
  35   35   * under license from the Regents of the University of California.
  36   36   */
  37   37  
  38   38  /*
  39   39   * UNIX machine dependent virtual memory support.
  40   40   */
  41   41  
  42   42  #include <sys/types.h>
  43   43  #include <sys/param.h>
  44   44  #include <sys/systm.h>
  45   45  #include <sys/user.h>
  46   46  #include <sys/proc.h>
  47   47  #include <sys/kmem.h>
  48   48  #include <sys/vmem.h>
  49   49  #include <sys/buf.h>
  50   50  #include <sys/cpuvar.h>
  51   51  #include <sys/lgrp.h>
  52   52  #include <sys/disp.h>
  53   53  #include <sys/vm.h>
  54   54  #include <sys/mman.h>
  55   55  #include <sys/vnode.h>
  56   56  #include <sys/cred.h>
  57   57  #include <sys/exec.h>
  58   58  #include <sys/exechdr.h>
  59   59  #include <sys/debug.h>
  60   60  #include <sys/vmsystm.h>
  61   61  #include <sys/swap.h>
  62   62  #include <sys/dumphdr.h>
  63   63  
  64   64  #include <vm/hat.h>
  65   65  #include <vm/as.h>
  66   66  #include <vm/seg.h>
  67   67  #include <vm/seg_kp.h>
  68   68  #include <vm/seg_vn.h>
  69   69  #include <vm/page.h>
  70   70  #include <vm/seg_kmem.h>
  71   71  #include <vm/seg_kpm.h>
  72   72  #include <vm/vm_dep.h>
  73   73  
  74   74  #include <sys/cpu.h>
  75   75  #include <sys/vm_machparam.h>
  76   76  #include <sys/memlist.h>
  77   77  #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */
  78   78  #include <vm/hat_i86.h>
  79   79  #include <sys/x86_archext.h>
  80   80  #include <sys/elf_386.h>
  81   81  #include <sys/cmn_err.h>
  82   82  #include <sys/archsystm.h>
  83   83  #include <sys/machsystm.h>
  84   84  
  85   85  #include <sys/vtrace.h>
  86   86  #include <sys/ddidmareq.h>
  87   87  #include <sys/promif.h>
  88   88  #include <sys/memnode.h>
  89   89  #include <sys/stack.h>
  90   90  #include <util/qsort.h>
  91   91  #include <sys/taskq.h>
  92   92  
  93   93  #ifdef __xpv
  94   94  
  95   95  #include <sys/hypervisor.h>
  96   96  #include <sys/xen_mmu.h>
  97   97  #include <sys/balloon_impl.h>
  98   98  
  99   99  /*
 100  100   * domain 0 pages usable for DMA are kept pre-allocated and kept in
 101  101   * distinct lists, ordered by increasing mfn.
 102  102   */
 103  103  static kmutex_t io_pool_lock;
 104  104  static kmutex_t contig_list_lock;
 105  105  static page_t *io_pool_4g;      /* pool for 32 bit dma limited devices */
 106  106  static page_t *io_pool_16m;     /* pool for 24 bit dma limited legacy devices */
 107  107  static long io_pool_cnt;
 108  108  static long io_pool_cnt_max = 0;
 109  109  #define DEFAULT_IO_POOL_MIN     128
 110  110  static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN;
 111  111  static long io_pool_cnt_lowater = 0;
 112  112  static long io_pool_shrink_attempts; /* how many times did we try to shrink */
 113  113  static long io_pool_shrinks;    /* how many times did we really shrink */
 114  114  static long io_pool_grows;      /* how many times did we grow */
 115  115  static mfn_t start_mfn = 1;
 116  116  static caddr_t io_pool_kva;     /* use to alloc pages when needed */
 117  117  
 118  118  static int create_contig_pfnlist(uint_t);
 119  119  
 120  120  /*
 121  121   * percentage of phys mem to hold in the i/o pool
 122  122   */
 123  123  #define DEFAULT_IO_POOL_PCT     2
 124  124  static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT;
 125  125  static void page_io_pool_sub(page_t **, page_t *, page_t *);
 126  126  int ioalloc_dbg = 0;
 127  127  
 128  128  #endif /* __xpv */
 129  129  
 130  130  uint_t vac_colors = 1;
 131  131  
 132  132  int largepagesupport = 0;
 133  133  extern uint_t page_create_new;
 134  134  extern uint_t page_create_exists;
 135  135  extern uint_t page_create_putbacks;
 136  136  /*
 137  137   * Allow users to disable the kernel's use of SSE.
 138  138   */
 139  139  extern int use_sse_pagecopy, use_sse_pagezero;
 140  140  
 141  141  /*
 142  142   * combined memory ranges from mnode and memranges[] to manage single
 143  143   * mnode/mtype dimension in the page lists.
 144  144   */
 145  145  typedef struct {
 146  146          pfn_t   mnr_pfnlo;
 147  147          pfn_t   mnr_pfnhi;
 148  148          int     mnr_mnode;
 149  149          int     mnr_memrange;           /* index into memranges[] */
 150  150          int     mnr_next;               /* next lower PA mnoderange */
 151  151          int     mnr_exists;
 152  152          /* maintain page list stats */
 153  153          pgcnt_t mnr_mt_clpgcnt;         /* cache list cnt */
 154  154          pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */
 155  155          pgcnt_t mnr_mt_totcnt;          /* sum of cache and free lists */
 156  156  #ifdef DEBUG
 157  157          struct mnr_mts {                /* mnode/mtype szc stats */
 158  158                  pgcnt_t mnr_mts_pgcnt;
 159  159                  int     mnr_mts_colors;
 160  160                  pgcnt_t *mnr_mtsc_pgcnt;
 161  161          }       *mnr_mts;
 162  162  #endif
 163  163  } mnoderange_t;
 164  164  
 165  165  #define MEMRANGEHI(mtype)                                               \
 166  166          ((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
 167  167  #define MEMRANGELO(mtype)       (memranges[mtype])
 168  168  
 169  169  #define MTYPE_FREEMEM(mt)       (mnoderanges[mt].mnr_mt_totcnt)
 170  170  
 171  171  /*
 172  172   * As the PC architecture evolved memory up was clumped into several
 173  173   * ranges for various historical I/O devices to do DMA.
 174  174   * < 16Meg - ISA bus
 175  175   * < 2Gig - ???
 176  176   * < 4Gig - PCI bus or drivers that don't understand PAE mode
 177  177   *
 178  178   * These are listed in reverse order, so that we can skip over unused
 179  179   * ranges on machines with small memories.
 180  180   *
 181  181   * For now under the Hypervisor, we'll only ever have one memrange.
 182  182   */
 183  183  #define PFN_4GIG        0x100000
 184  184  #define PFN_16MEG       0x1000
 185  185  /* Indices into the memory range (arch_memranges) array. */
 186  186  #define MRI_4G          0
 187  187  #define MRI_2G          1
 188  188  #define MRI_16M         2
 189  189  #define MRI_0           3
 190  190  static pfn_t arch_memranges[NUM_MEM_RANGES] = {
 191  191      PFN_4GIG,   /* pfn range for 4G and above */
 192  192      0x80000,    /* pfn range for 2G-4G */
 193  193      PFN_16MEG,  /* pfn range for 16M-2G */
 194  194      0x00000,    /* pfn range for 0-16M */
 195  195  };
 196  196  pfn_t *memranges = &arch_memranges[0];
 197  197  int nranges = NUM_MEM_RANGES;
 198  198  
 199  199  /*
 200  200   * This combines mem_node_config and memranges into one data
 201  201   * structure to be used for page list management.
 202  202   */
 203  203  mnoderange_t    *mnoderanges;
 204  204  int             mnoderangecnt;
 205  205  int             mtype4g;
 206  206  int             mtype16m;
 207  207  int             mtypetop;       /* index of highest pfn'ed mnoderange */
 208  208  
 209  209  /*
 210  210   * 4g memory management variables for systems with more than 4g of memory:
 211  211   *
 212  212   * physical memory below 4g is required for 32bit dma devices and, currently,
 213  213   * for kmem memory. On systems with more than 4g of memory, the pool of memory
 214  214   * below 4g can be depleted without any paging activity given that there is
 215  215   * likely to be sufficient memory above 4g.
 216  216   *
 217  217   * physmax4g is set true if the largest pfn is over 4g. The rest of the
 218  218   * 4g memory management code is enabled only when physmax4g is true.
 219  219   *
 220  220   * maxmem4g is the count of the maximum number of pages on the page lists
 221  221   * with physical addresses below 4g. It can be a lot less then 4g given that
 222  222   * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
 223  223   * agp aperture etc.
 224  224   *
 225  225   * freemem4g maintains the count of the number of available pages on the
 226  226   * page lists with physical addresses below 4g.
 227  227   *
 228  228   * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
 229  229   * 6% (desfree4gshift = 4) of maxmem4g.
 230  230   *
 231  231   * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
 232  232   * and the amount of physical memory above 4g is greater than freemem4g.
 233  233   * In this case, page_get_* routines will restrict below 4g allocations
 234  234   * for requests that don't specifically require it.
 235  235   */
 236  236  
 237  237  #define DESFREE4G       (maxmem4g >> desfree4gshift)
 238  238  
 239  239  #define RESTRICT4G_ALLOC                                        \
 240  240          (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
 241  241  
 242  242  static pgcnt_t  maxmem4g;
 243  243  static pgcnt_t  freemem4g;
 244  244  static int      physmax4g;
 245  245  static int      desfree4gshift = 4;     /* maxmem4g shift to derive DESFREE4G */
 246  246  
 247  247  /*
 248  248   * 16m memory management:
 249  249   *
 250  250   * reserve some amount of physical memory below 16m for legacy devices.
 251  251   *
 252  252   * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above
 253  253   * 16m or if the 16m pool drops below DESFREE16M.
 254  254   *
 255  255   * In this case, general page allocations via page_get_{free,cache}list
 256  256   * routines will be restricted from allocating from the 16m pool. Allocations
 257  257   * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations
 258  258   * are not restricted.
 259  259   */
 260  260  
 261  261  #define FREEMEM16M      MTYPE_FREEMEM(mtype16m)
 262  262  #define DESFREE16M      desfree16m
 263  263  #define RESTRICT16M_ALLOC(freemem, pgcnt, flags)                \
 264  264          ((freemem != 0) && ((flags & PG_PANIC) == 0) &&         \
 265  265              ((freemem >= (FREEMEM16M)) ||                       \
 266  266              (FREEMEM16M  < (DESFREE16M + pgcnt))))
 267  267  
 268  268  static pgcnt_t  desfree16m = 0x380;
 269  269  
 270  270  /*
 271  271   * This can be patched via /etc/system to allow old non-PAE aware device
 272  272   * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM.
 273  273   */
 274  274  int restricted_kmemalloc = 0;
 275  275  
 276  276  #ifdef VM_STATS
 277  277  struct {
 278  278          ulong_t pga_alloc;
 279  279          ulong_t pga_notfullrange;
 280  280          ulong_t pga_nulldmaattr;
 281  281          ulong_t pga_allocok;
 282  282          ulong_t pga_allocfailed;
 283  283          ulong_t pgma_alloc;
 284  284          ulong_t pgma_allocok;
 285  285          ulong_t pgma_allocfailed;
 286  286          ulong_t pgma_allocempty;
 287  287  } pga_vmstats;
 288  288  #endif
 289  289  
 290  290  uint_t mmu_page_sizes;
 291  291  
 292  292  /* How many page sizes the users can see */
 293  293  uint_t mmu_exported_page_sizes;
 294  294  
 295  295  /* page sizes that legacy applications can see */
 296  296  uint_t mmu_legacy_page_sizes;
 297  297  
 298  298  /*
 299  299   * Number of pages in 1 GB.  Don't enable automatic large pages if we have
 300  300   * fewer than this many pages.
 301  301   */
 302  302  pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
 303  303  pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
 304  304  
 305  305  /*
 306  306   * Maximum and default segment size tunables for user private
 307  307   * and shared anon memory, and user text and initialized data.
 308  308   * These can be patched via /etc/system to allow large pages
 309  309   * to be used for mapping application private and shared anon memory.
 310  310   */
 311  311  size_t mcntl0_lpsize = MMU_PAGESIZE;
 312  312  size_t max_uheap_lpsize = MMU_PAGESIZE;
 313  313  size_t default_uheap_lpsize = MMU_PAGESIZE;
 314  314  size_t max_ustack_lpsize = MMU_PAGESIZE;
 315  315  size_t default_ustack_lpsize = MMU_PAGESIZE;
 316  316  size_t max_privmap_lpsize = MMU_PAGESIZE;
 317  317  size_t max_uidata_lpsize = MMU_PAGESIZE;
 318  318  size_t max_utext_lpsize = MMU_PAGESIZE;
 319  319  size_t max_shm_lpsize = MMU_PAGESIZE;
 320  320  
 321  321  
 322  322  /*
 323  323   * initialized by page_coloring_init().
 324  324   */
 325  325  uint_t  page_colors;
 326  326  uint_t  page_colors_mask;
 327  327  uint_t  page_coloring_shift;
 328  328  int     cpu_page_colors;
 329  329  static uint_t   l2_colors;
 330  330  
 331  331  /*
 332  332   * Page freelists and cachelists are dynamically allocated once mnoderangecnt
 333  333   * and page_colors are calculated from the l2 cache n-way set size.  Within a
 334  334   * mnode range, the page freelist and cachelist are hashed into bins based on
 335  335   * color. This makes it easier to search for a page within a specific memory
 336  336   * range.
 337  337   */
 338  338  #define PAGE_COLORS_MIN 16
 339  339  
 340  340  page_t ****page_freelists;
 341  341  page_t ***page_cachelists;
 342  342  
 343  343  
 344  344  /*
 345  345   * Used by page layer to know about page sizes
 346  346   */
 347  347  hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1];
 348  348  
 349  349  kmutex_t        *fpc_mutex[NPC_MUTEX];
 350  350  kmutex_t        *cpc_mutex[NPC_MUTEX];
 351  351  
 352  352  /* Lock to protect mnoderanges array for memory DR operations. */
 353  353  static kmutex_t mnoderange_lock;
 354  354  
 355  355  /*
 356  356   * Only let one thread at a time try to coalesce large pages, to
 357  357   * prevent them from working against each other.
 358  358   */
 359  359  static kmutex_t contig_lock;
 360  360  #define CONTIG_LOCK()   mutex_enter(&contig_lock);
 361  361  #define CONTIG_UNLOCK() mutex_exit(&contig_lock);
 362  362  
 363  363  #define PFN_16M         (mmu_btop((uint64_t)0x1000000))
 364  364  
 365  365  /*
 366  366   * Return the optimum page size for a given mapping
 367  367   */
 368  368  /*ARGSUSED*/
 369  369  size_t
 370  370  map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
 371  371  {
 372  372          level_t l = 0;
 373  373          size_t pgsz = MMU_PAGESIZE;
 374  374          size_t max_lpsize;
 375  375          uint_t mszc;
 376  376  
 377  377          ASSERT(maptype != MAPPGSZ_VA);
 378  378  
 379  379          if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) {
 380  380                  return (MMU_PAGESIZE);
 381  381          }
 382  382  
 383  383          switch (maptype) {
 384  384          case MAPPGSZ_HEAP:
 385  385          case MAPPGSZ_STK:
 386  386                  max_lpsize = memcntl ? mcntl0_lpsize : (maptype ==
 387  387                      MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize);
 388  388                  if (max_lpsize == MMU_PAGESIZE) {
 389  389                          return (MMU_PAGESIZE);
 390  390                  }
 391  391                  if (len == 0) {
 392  392                          len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase +
 393  393                              p->p_brksize - p->p_bssbase : p->p_stksize;
 394  394                  }
 395  395                  len = (maptype == MAPPGSZ_HEAP) ? MAX(len,
 396  396                      default_uheap_lpsize) : MAX(len, default_ustack_lpsize);
 397  397  
 398  398                  /*
 399  399                   * use the pages size that best fits len
 400  400                   */
 401  401                  for (l = mmu.umax_page_level; l > 0; --l) {
 402  402                          if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) {
 403  403                                  continue;
 404  404                          } else {
 405  405                                  pgsz = LEVEL_SIZE(l);
 406  406                          }
 407  407                          break;
 408  408                  }
 409  409  
 410  410                  mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc :
 411  411                      p->p_stkpageszc);
 412  412                  if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) {
 413  413                          pgsz = hw_page_array[mszc].hp_size;
 414  414                  }
 415  415                  return (pgsz);
 416  416  
 417  417          case MAPPGSZ_ISM:
 418  418                  for (l = mmu.umax_page_level; l > 0; --l) {
 419  419                          if (len >= LEVEL_SIZE(l))
 420  420                                  return (LEVEL_SIZE(l));
 421  421                  }
 422  422                  return (LEVEL_SIZE(0));
 423  423          }
 424  424          return (pgsz);
 425  425  }
 426  426  
 427  427  static uint_t
 428  428  map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize,
 429  429      size_t min_physmem)
 430  430  {
 431  431          caddr_t eaddr = addr + size;
 432  432          uint_t szcvec = 0;
 433  433          caddr_t raddr;
 434  434          caddr_t readdr;
 435  435          size_t  pgsz;
 436  436          int i;
 437  437  
 438  438          if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) {
 439  439                  return (0);
 440  440          }
 441  441  
 442  442          for (i = mmu_exported_page_sizes - 1; i > 0; i--) {
 443  443                  pgsz = page_get_pagesize(i);
 444  444                  if (pgsz > max_lpsize) {
 445  445                          continue;
 446  446                  }
 447  447                  raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz);
 448  448                  readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz);
 449  449                  if (raddr < addr || raddr >= readdr) {
 450  450                          continue;
 451  451                  }
 452  452                  if (P2PHASE((uintptr_t)addr ^ off, pgsz)) {
 453  453                          continue;
 454  454                  }
 455  455                  /*
 456  456                   * Set szcvec to the remaining page sizes.
 457  457                   */
 458  458                  szcvec = ((1 << (i + 1)) - 1) & ~1;
 459  459                  break;
 460  460          }
 461  461          return (szcvec);
 462  462  }
 463  463  
 464  464  /*
 465  465   * Return a bit vector of large page size codes that
 466  466   * can be used to map [addr, addr + len) region.
 467  467   */
 468  468  /*ARGSUSED*/
 469  469  uint_t
 470  470  map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type,
 471  471      int memcntl)
 472  472  {
 473  473          size_t max_lpsize = mcntl0_lpsize;
 474  474  
 475  475          if (mmu.max_page_level == 0)
 476  476                  return (0);
 477  477  
 478  478          if (flags & MAP_TEXT) {
 479  479                  if (!memcntl)
 480  480                          max_lpsize = max_utext_lpsize;
 481  481                  return (map_szcvec(addr, size, off, max_lpsize,
 482  482                      shm_lpg_min_physmem));
 483  483  
 484  484          } else if (flags & MAP_INITDATA) {
 485  485                  if (!memcntl)
 486  486                          max_lpsize = max_uidata_lpsize;
 487  487                  return (map_szcvec(addr, size, off, max_lpsize,
 488  488                      privm_lpg_min_physmem));
 489  489  
 490  490          } else if (type == MAPPGSZC_SHM) {
 491  491                  if (!memcntl)
 492  492                          max_lpsize = max_shm_lpsize;
 493  493                  return (map_szcvec(addr, size, off, max_lpsize,
 494  494                      shm_lpg_min_physmem));
 495  495  
 496  496          } else if (type == MAPPGSZC_HEAP) {
 497  497                  if (!memcntl)
 498  498                          max_lpsize = max_uheap_lpsize;
 499  499                  return (map_szcvec(addr, size, off, max_lpsize,
 500  500                      privm_lpg_min_physmem));
 501  501  
 502  502          } else if (type == MAPPGSZC_STACK) {
 503  503                  if (!memcntl)
 504  504                          max_lpsize = max_ustack_lpsize;
 505  505                  return (map_szcvec(addr, size, off, max_lpsize,
 506  506                      privm_lpg_min_physmem));
 507  507  
 508  508          } else {
 509  509                  if (!memcntl)
 510  510                          max_lpsize = max_privmap_lpsize;
 511  511                  return (map_szcvec(addr, size, off, max_lpsize,
 512  512                      privm_lpg_min_physmem));
 513  513          }
 514  514  }
 515  515  
 516  516  /*
 517  517   * Handle a pagefault.
 518  518   */
 519  519  faultcode_t
 520  520  pagefault(
 521  521          caddr_t addr,
 522  522          enum fault_type type,
 523  523          enum seg_rw rw,
 524  524          int iskernel)
 525  525  {
 526  526          struct as *as;
 527  527          struct hat *hat;
 528  528          struct proc *p;
 529  529          kthread_t *t;
 530  530          faultcode_t res;
 531  531          caddr_t base;
 532  532          size_t len;
 533  533          int err;
 534  534          int mapped_red;
 535  535          uintptr_t ea;
 536  536  
 537  537          ASSERT_STACK_ALIGNED();
 538  538  
 539  539          if (INVALID_VADDR(addr))
 540  540                  return (FC_NOMAP);
 541  541  
 542  542          mapped_red = segkp_map_red();
 543  543  
 544  544          if (iskernel) {
 545  545                  as = &kas;
 546  546                  hat = as->a_hat;
 547  547          } else {
 548  548                  t = curthread;
 549  549                  p = ttoproc(t);
 550  550                  as = p->p_as;
 551  551                  hat = as->a_hat;
 552  552          }
 553  553  
 554  554          /*
 555  555           * Dispatch pagefault.
 556  556           */
 557  557          res = as_fault(hat, as, addr, 1, type, rw);
 558  558  
 559  559          /*
 560  560           * If this isn't a potential unmapped hole in the user's
 561  561           * UNIX data or stack segments, just return status info.
 562  562           */
 563  563          if (res != FC_NOMAP || iskernel)
 564  564                  goto out;
 565  565  
 566  566          /*
 567  567           * Check to see if we happened to faulted on a currently unmapped
 568  568           * part of the UNIX data or stack segments.  If so, create a zfod
 569  569           * mapping there and then try calling the fault routine again.
 570  570           */
 571  571          base = p->p_brkbase;
 572  572          len = p->p_brksize;
 573  573  
 574  574          if (addr < base || addr >= base + len) {                /* data seg? */
 575  575                  base = (caddr_t)p->p_usrstack - p->p_stksize;
 576  576                  len = p->p_stksize;
 577  577                  if (addr < base || addr >= p->p_usrstack) {     /* stack seg? */
 578  578                          /* not in either UNIX data or stack segments */
 579  579                          res = FC_NOMAP;
 580  580                          goto out;
 581  581                  }
 582  582          }
 583  583  
 584  584          /*
 585  585           * the rest of this function implements a 3.X 4.X 5.X compatibility
 586  586           * This code is probably not needed anymore
 587  587           */
 588  588          if (p->p_model == DATAMODEL_ILP32) {
 589  589  
 590  590                  /* expand the gap to the page boundaries on each side */
 591  591                  ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE);
 592  592                  base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE);
 593  593                  len = ea - (uintptr_t)base;
 594  594  
 595  595                  as_rangelock(as);
 596  596                  if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) ==
 597  597                      0) {
 598  598                          err = as_map(as, base, len, segvn_create, zfod_argsp);
 599  599                          as_rangeunlock(as);
 600  600                          if (err) {
 601  601                                  res = FC_MAKE_ERR(err);
 602  602                                  goto out;
 603  603                          }
 604  604                  } else {
 605  605                          /*
 606  606                           * This page is already mapped by another thread after
 607  607                           * we returned from as_fault() above.  We just fall
 608  608                           * through as_fault() below.
 609  609                           */
 610  610                          as_rangeunlock(as);
 611  611                  }
 612  612  
 613  613                  res = as_fault(hat, as, addr, 1, F_INVAL, rw);
 614  614          }
 615  615  
 616  616  out:
 617  617          if (mapped_red)
 618  618                  segkp_unmap_red();
 619  619  
 620  620          return (res);
 621  621  }
 622  622  
 623  623  void
 624  624  map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags)
 625  625  {
 626  626          struct proc *p = curproc;
 627  627          map_addr_proc(addrp, len, off, vacalign,
 628  628              map_userlimit(p, p->p_as, flags), curproc, flags);
 629  629  }
 630  630  
 631  631  /*ARGSUSED*/
 632  632  int
 633  633  map_addr_vacalign_check(caddr_t addr, u_offset_t off)
 634  634  {
 635  635          return (0);
 636  636  }
 637  637  
 638  638  /*
 639  639   * map_addr_proc() is the routine called when the system is to
 640  640   * choose an address for the user.  We will pick an address
 641  641   * range which is the highest available below userlimit.
 642  642   *
 643  643   * Every mapping will have a redzone of a single page on either side of
 644  644   * the request. This is done to leave one page unmapped between segments.
 645  645   * This is not required, but it's useful for the user because if their
 646  646   * program strays across a segment boundary, it will catch a fault
 647  647   * immediately making debugging a little easier.  Currently the redzone
 648  648   * is mandatory.
 649  649   *
 650  650   * addrp is a value/result parameter.
 651  651   *      On input it is a hint from the user to be used in a completely
 652  652   *      machine dependent fashion.  We decide to completely ignore this hint.
 653  653   *      If MAP_ALIGN was specified, addrp contains the minimal alignment, which
 654  654   *      must be some "power of two" multiple of pagesize.
 655  655   *
 656  656   *      On output it is NULL if no address can be found in the current
 657  657   *      processes address space or else an address that is currently
 658  658   *      not mapped for len bytes with a page of red zone on either side.
 659  659   *
 660  660   *      vacalign is not needed on x86 (it's for viturally addressed caches)
 661  661   */
 662  662  /*ARGSUSED*/
 663  663  void
 664  664  map_addr_proc(
 665  665          caddr_t *addrp,
 666  666          size_t len,
 667  667          offset_t off,
 668  668          int vacalign,
 669  669          caddr_t userlimit,
 670  670          struct proc *p,
 671  671          uint_t flags)
 672  672  {
 673  673          struct as *as = p->p_as;
 674  674          caddr_t addr;
 675  675          caddr_t base;
 676  676          size_t slen;
 677  677          size_t align_amount;
 678  678  
 679  679          ASSERT32(userlimit == as->a_userlimit);
 680  680  
 681  681          base = p->p_brkbase;
 682  682  #if defined(__amd64)
 683  683          /*
 684  684           * XX64 Yes, this needs more work.
 685  685           */
 686  686          if (p->p_model == DATAMODEL_NATIVE) {
 687  687                  if (userlimit < as->a_userlimit) {
 688  688                          /*
 689  689                           * This happens when a program wants to map
 690  690                           * something in a range that's accessible to a
 691  691                           * program in a smaller address space.  For example,
 692  692                           * a 64-bit program calling mmap32(2) to guarantee
 693  693                           * that the returned address is below 4Gbytes.
 694  694                           */
 695  695                          ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff));
 696  696  
 697  697                          if (userlimit > base)
 698  698                                  slen = userlimit - base;
 699  699                          else {
 700  700                                  *addrp = NULL;
 701  701                                  return;
 702  702                          }
 703  703                  } else {
 704  704                          /*
 705  705                           * XX64 This layout is probably wrong .. but in
 706  706                           * the event we make the amd64 address space look
 707  707                           * like sparcv9 i.e. with the stack -above- the
 708  708                           * heap, this bit of code might even be correct.
 709  709                           */
 710  710                          slen = p->p_usrstack - base -
 711  711                              ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
 712  712                  }
 713  713          } else
 714  714  #endif
 715  715                  slen = userlimit - base;
 716  716  
 717  717          /* Make len be a multiple of PAGESIZE */
 718  718          len = (len + PAGEOFFSET) & PAGEMASK;
 719  719  
 720  720          /*
 721  721           * figure out what the alignment should be
 722  722           *
 723  723           * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
 724  724           */
 725  725          if (len <= ELF_386_MAXPGSZ) {
 726  726                  /*
 727  727                   * Align virtual addresses to ensure that ELF shared libraries
 728  728                   * are mapped with the appropriate alignment constraints by
 729  729                   * the run-time linker.
 730  730                   */
 731  731                  align_amount = ELF_386_MAXPGSZ;
 732  732          } else {
 733  733                  /*
 734  734                   * For 32-bit processes, only those which have specified
 735  735                   * MAP_ALIGN and an addr will be aligned on a larger page size.
 736  736                   * Not doing so can potentially waste up to 1G of process
 737  737                   * address space.
 738  738                   */
 739  739                  int lvl = (p->p_model == DATAMODEL_ILP32) ? 1 :
 740  740                      mmu.umax_page_level;
 741  741  
 742  742                  while (lvl && len < LEVEL_SIZE(lvl))
 743  743                          --lvl;
 744  744  
 745  745                  align_amount = LEVEL_SIZE(lvl);
 746  746          }
 747  747          if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
 748  748                  align_amount = (uintptr_t)*addrp;
 749  749  
 750  750          ASSERT(ISP2(align_amount));
 751  751          ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
 752  752  
 753  753          off = off & (align_amount - 1);
 754  754          /*
 755  755           * Look for a large enough hole starting below userlimit.
 756  756           * After finding it, use the upper part.
 757  757           */
 758  758          if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
 759  759              PAGESIZE, off) == 0) {
 760  760                  caddr_t as_addr;
 761  761  
 762  762                  /*
 763  763                   * addr is the highest possible address to use since we have
 764  764                   * a PAGESIZE redzone at the beginning and end.
 765  765                   */
 766  766                  addr = base + slen - (PAGESIZE + len);
 767  767                  as_addr = addr;
 768  768                  /*
 769  769                   * Round address DOWN to the alignment amount and
 770  770                   * add the offset in.
 771  771                   * If addr is greater than as_addr, len would not be large
 772  772                   * enough to include the redzone, so we must adjust down
 773  773                   * by the alignment amount.
 774  774                   */
 775  775                  addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
 776  776                  addr += (uintptr_t)off;
 777  777                  if (addr > as_addr) {
 778  778                          addr -= align_amount;
 779  779                  }
 780  780  
 781  781                  ASSERT(addr > base);
 782  782                  ASSERT(addr + len < base + slen);
 783  783                  ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
 784  784                      ((uintptr_t)(off)));
 785  785                  *addrp = addr;
 786  786          } else {
 787  787                  *addrp = NULL;  /* no more virtual space */
 788  788          }
 789  789  }
 790  790  
 791  791  int valid_va_range_aligned_wraparound;
 792  792  
 793  793  /*
 794  794   * Determine whether [*basep, *basep + *lenp) contains a mappable range of
 795  795   * addresses at least "minlen" long, where the base of the range is at "off"
 796  796   * phase from an "align" boundary and there is space for a "redzone"-sized
 797  797   * redzone on either side of the range.  On success, 1 is returned and *basep
 798  798   * and *lenp are adjusted to describe the acceptable range (including
 799  799   * the redzone).  On failure, 0 is returned.
 800  800   */
 801  801  /*ARGSUSED3*/
 802  802  int
 803  803  valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
 804  804      size_t align, size_t redzone, size_t off)
 805  805  {
 806  806          uintptr_t hi, lo;
 807  807          size_t tot_len;
 808  808  
 809  809          ASSERT(align == 0 ? off == 0 : off < align);
 810  810          ASSERT(ISP2(align));
 811  811          ASSERT(align == 0 || align >= PAGESIZE);
 812  812  
 813  813          lo = (uintptr_t)*basep;
 814  814          hi = lo + *lenp;
 815  815          tot_len = minlen + 2 * redzone; /* need at least this much space */
 816  816  
 817  817          /*
 818  818           * If hi rolled over the top, try cutting back.
 819  819           */
 820  820          if (hi < lo) {
 821  821                  *lenp = 0UL - lo - 1UL;
 822  822                  /* See if this really happens. If so, then we figure out why */
 823  823                  valid_va_range_aligned_wraparound++;
 824  824                  hi = lo + *lenp;
 825  825          }
 826  826          if (*lenp < tot_len) {
 827  827                  return (0);
 828  828          }
 829  829  
 830  830  #if defined(__amd64)
 831  831          /*
 832  832           * Deal with a possible hole in the address range between
 833  833           * hole_start and hole_end that should never be mapped.
 834  834           */
 835  835          if (lo < hole_start) {
 836  836                  if (hi > hole_start) {
 837  837                          if (hi < hole_end) {
 838  838                                  hi = hole_start;
 839  839                          } else {
 840  840                                  /* lo < hole_start && hi >= hole_end */
 841  841                                  if (dir == AH_LO) {
 842  842                                          /*
 843  843                                           * prefer lowest range
 844  844                                           */
 845  845                                          if (hole_start - lo >= tot_len)
 846  846                                                  hi = hole_start;
 847  847                                          else if (hi - hole_end >= tot_len)
 848  848                                                  lo = hole_end;
 849  849                                          else
 850  850                                                  return (0);
 851  851                                  } else {
 852  852                                          /*
 853  853                                           * prefer highest range
 854  854                                           */
 855  855                                          if (hi - hole_end >= tot_len)
 856  856                                                  lo = hole_end;
 857  857                                          else if (hole_start - lo >= tot_len)
 858  858                                                  hi = hole_start;
 859  859                                          else
 860  860                                                  return (0);
 861  861                                  }
 862  862                          }
 863  863                  }
 864  864          } else {
 865  865                  /* lo >= hole_start */
 866  866                  if (hi < hole_end)
 867  867                          return (0);
 868  868                  if (lo < hole_end)
 869  869                          lo = hole_end;
 870  870          }
 871  871  #endif
 872  872  
 873  873          if (hi - lo < tot_len)
 874  874                  return (0);
 875  875  
 876  876          if (align > 1) {
 877  877                  uintptr_t tlo = lo + redzone;
 878  878                  uintptr_t thi = hi - redzone;
 879  879                  tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
 880  880                  if (tlo < lo + redzone) {
 881  881                          return (0);
 882  882                  }
 883  883                  if (thi < tlo || thi - tlo < minlen) {
 884  884                          return (0);
 885  885                  }
 886  886          }
 887  887  
 888  888          *basep = (caddr_t)lo;
 889  889          *lenp = hi - lo;
 890  890          return (1);
 891  891  }
 892  892  
 893  893  /*
 894  894   * Determine whether [*basep, *basep + *lenp) contains a mappable range of
 895  895   * addresses at least "minlen" long.  On success, 1 is returned and *basep
 896  896   * and *lenp are adjusted to describe the acceptable range.  On failure, 0
 897  897   * is returned.
 898  898   */
 899  899  int
 900  900  valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
 901  901  {
 902  902          return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
 903  903  }
 904  904  
 905  905  /*
 906  906   * Determine whether [addr, addr+len] are valid user addresses.
 907  907   */
 908  908  /*ARGSUSED*/
 909  909  int
 910  910  valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
 911  911      caddr_t userlimit)
 912  912  {
 913  913          caddr_t eaddr = addr + len;
 914  914  
 915  915          if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
 916  916                  return (RANGE_BADADDR);
 917  917  
 918  918  #if defined(__amd64)
 919  919          /*
 920  920           * Check for the VA hole
 921  921           */
 922  922          if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end)
 923  923                  return (RANGE_BADADDR);
 924  924  #endif
 925  925  
 926  926          return (RANGE_OKAY);
 927  927  }
 928  928  
 929  929  /*
 930  930   * Return 1 if the page frame is onboard memory, else 0.
 931  931   */
 932  932  int
 933  933  pf_is_memory(pfn_t pf)
 934  934  {
 935  935          if (pfn_is_foreign(pf))
 936  936                  return (0);
 937  937          return (address_in_memlist(phys_install, pfn_to_pa(pf), 1));
 938  938  }
 939  939  
 940  940  /*
 941  941   * return the memrange containing pfn
 942  942   */
 943  943  int
 944  944  memrange_num(pfn_t pfn)
 945  945  {
 946  946          int n;
 947  947  
 948  948          for (n = 0; n < nranges - 1; ++n) {
 949  949                  if (pfn >= memranges[n])
 950  950                          break;
 951  951          }
 952  952          return (n);
 953  953  }
 954  954  
 955  955  /*
 956  956   * return the mnoderange containing pfn
 957  957   */
 958  958  /*ARGSUSED*/
 959  959  int
 960  960  pfn_2_mtype(pfn_t pfn)
 961  961  {
 962  962  #if defined(__xpv)
 963  963          return (0);
 964  964  #else
 965  965          int     n;
 966  966  
 967  967          /* Always start from highest pfn and work our way down */
 968  968          for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
 969  969                  if (pfn >= mnoderanges[n].mnr_pfnlo) {
 970  970                          break;
 971  971                  }
 972  972          }
 973  973          return (n);
 974  974  #endif
 975  975  }
 976  976  
 977  977  #if !defined(__xpv)
 978  978  /*
 979  979   * is_contigpage_free:
 980  980   *      returns a page list of contiguous pages. It minimally has to return
 981  981   *      minctg pages. Caller determines minctg based on the scatter-gather
 982  982   *      list length.
 983  983   *
 984  984   *      pfnp is set to the next page frame to search on return.
 985  985   */
 986  986  static page_t *
 987  987  is_contigpage_free(
 988  988          pfn_t *pfnp,
 989  989          pgcnt_t *pgcnt,
 990  990          pgcnt_t minctg,
 991  991          uint64_t pfnseg,
 992  992          int iolock)
 993  993  {
 994  994          int     i = 0;
 995  995          pfn_t   pfn = *pfnp;
 996  996          page_t  *pp;
 997  997          page_t  *plist = NULL;
 998  998  
 999  999          /*
1000 1000           * fail if pfn + minctg crosses a segment boundary.
1001 1001           * Adjust for next starting pfn to begin at segment boundary.
1002 1002           */
1003 1003  
1004 1004          if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) {
1005 1005                  *pfnp = roundup(*pfnp, pfnseg + 1);
1006 1006                  return (NULL);
1007 1007          }
1008 1008  
1009 1009          do {
1010 1010  retry:
1011 1011                  pp = page_numtopp_nolock(pfn + i);
1012 1012                  if ((pp == NULL) || IS_DUMP_PAGE(pp) ||
1013 1013                      (page_trylock(pp, SE_EXCL) == 0)) {
1014 1014                          (*pfnp)++;
1015 1015                          break;
1016 1016                  }
1017 1017                  if (page_pptonum(pp) != pfn + i) {
1018 1018                          page_unlock(pp);
1019 1019                          goto retry;
1020 1020                  }
1021 1021  
1022 1022                  if (!(PP_ISFREE(pp))) {
1023 1023                          page_unlock(pp);
1024 1024                          (*pfnp)++;
1025 1025                          break;
1026 1026                  }
1027 1027  
1028 1028                  if (!PP_ISAGED(pp)) {
1029 1029                          page_list_sub(pp, PG_CACHE_LIST);
1030 1030                          page_hashout(pp, (kmutex_t *)NULL);
1031 1031                  } else {
1032 1032                          page_list_sub(pp, PG_FREE_LIST);
1033 1033                  }
1034 1034  
1035 1035                  if (iolock)
1036 1036                          page_io_lock(pp);
1037 1037                  page_list_concat(&plist, &pp);
1038 1038  
1039 1039                  /*
1040 1040                   * exit loop when pgcnt satisfied or segment boundary reached.
1041 1041                   */
1042 1042  
1043 1043          } while ((++i < *pgcnt) && ((pfn + i) & pfnseg));
1044 1044  
1045 1045          *pfnp += i;             /* set to next pfn to search */
1046 1046  
1047 1047          if (i >= minctg) {
1048 1048                  *pgcnt -= i;
1049 1049                  return (plist);
1050 1050          }
1051 1051  
1052 1052          /*
1053 1053           * failure: minctg not satisfied.
1054 1054           *
1055 1055           * if next request crosses segment boundary, set next pfn
1056 1056           * to search from the segment boundary.
1057 1057           */
1058 1058          if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg))
1059 1059                  *pfnp = roundup(*pfnp, pfnseg + 1);
1060 1060  
1061 1061          /* clean up any pages already allocated */
1062 1062  
1063 1063          while (plist) {
1064 1064                  pp = plist;
1065 1065                  page_sub(&plist, pp);
1066 1066                  page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
1067 1067                  if (iolock)
1068 1068                          page_io_unlock(pp);
1069 1069                  page_unlock(pp);
1070 1070          }
1071 1071  
1072 1072          return (NULL);
1073 1073  }
1074 1074  #endif  /* !__xpv */
1075 1075  
1076 1076  /*
1077 1077   * verify that pages being returned from allocator have correct DMA attribute
1078 1078   */
1079 1079  #ifndef DEBUG
1080 1080  #define check_dma(a, b, c) (void)(0)
1081 1081  #else
1082 1082  static void
1083 1083  check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt)
1084 1084  {
1085 1085          if (dma_attr == NULL)
1086 1086                  return;
1087 1087  
1088 1088          while (cnt-- > 0) {
1089 1089                  if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) <
1090 1090                      dma_attr->dma_attr_addr_lo)
1091 1091                          panic("PFN (pp=%p) below dma_attr_addr_lo", (void *)pp);
1092 1092                  if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >=
1093 1093                      dma_attr->dma_attr_addr_hi)
1094 1094                          panic("PFN (pp=%p) above dma_attr_addr_hi", (void *)pp);
1095 1095                  pp = pp->p_next;
1096 1096          }
1097 1097  }
1098 1098  #endif
1099 1099  
1100 1100  #if !defined(__xpv)
1101 1101  static page_t *
1102 1102  page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock)
1103 1103  {
1104 1104          pfn_t           pfn;
1105 1105          int             sgllen;
1106 1106          uint64_t        pfnseg;
1107 1107          pgcnt_t         minctg;
1108 1108          page_t          *pplist = NULL, *plist;
1109 1109          uint64_t        lo, hi;
1110 1110          pgcnt_t         pfnalign = 0;
1111 1111          static pfn_t    startpfn;
1112 1112          static pgcnt_t  lastctgcnt;
1113 1113          uintptr_t       align;
1114 1114  
1115 1115          CONTIG_LOCK();
1116 1116  
1117 1117          if (mattr) {
1118 1118                  lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET));
1119 1119                  hi = mmu_btop(mattr->dma_attr_addr_hi);
1120 1120                  if (hi >= physmax)
1121 1121                          hi = physmax - 1;
1122 1122                  sgllen = mattr->dma_attr_sgllen;
1123 1123                  pfnseg = mmu_btop(mattr->dma_attr_seg);
1124 1124  
1125 1125                  align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
1126 1126                  if (align > MMU_PAGESIZE)
1127 1127                          pfnalign = mmu_btop(align);
1128 1128  
1129 1129                  /*
1130 1130                   * in order to satisfy the request, must minimally
1131 1131                   * acquire minctg contiguous pages
1132 1132                   */
1133 1133                  minctg = howmany(*pgcnt, sgllen);
1134 1134  
1135 1135                  ASSERT(hi >= lo);
1136 1136  
1137 1137                  /*
1138 1138                   * start from where last searched if the minctg >= lastctgcnt
1139 1139                   */
1140 1140                  if (minctg < lastctgcnt || startpfn < lo || startpfn > hi)
1141 1141                          startpfn = lo;
1142 1142          } else {
1143 1143                  hi = physmax - 1;
1144 1144                  lo = 0;
1145 1145                  sgllen = 1;
1146 1146                  pfnseg = mmu.highest_pfn;
1147 1147                  minctg = *pgcnt;
1148 1148  
1149 1149                  if (minctg < lastctgcnt)
1150 1150                          startpfn = lo;
1151 1151          }
1152 1152          lastctgcnt = minctg;
1153 1153  
1154 1154          ASSERT(pfnseg + 1 >= (uint64_t)minctg);
1155 1155  
1156 1156          /* conserve 16m memory - start search above 16m when possible */
1157 1157          if (hi > PFN_16M && startpfn < PFN_16M)
1158 1158                  startpfn = PFN_16M;
1159 1159  
1160 1160          pfn = startpfn;
1161 1161          if (pfnalign)
1162 1162                  pfn = P2ROUNDUP(pfn, pfnalign);
1163 1163  
1164 1164          while (pfn + minctg - 1 <= hi) {
1165 1165  
1166 1166                  plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1167 1167                  if (plist) {
1168 1168                          page_list_concat(&pplist, &plist);
1169 1169                          sgllen--;
1170 1170                          /*
1171 1171                           * return when contig pages no longer needed
1172 1172                           */
1173 1173                          if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1174 1174                                  startpfn = pfn;
1175 1175                                  CONTIG_UNLOCK();
1176 1176                                  check_dma(mattr, pplist, *pgcnt);
1177 1177                                  return (pplist);
1178 1178                          }
1179 1179                          minctg = howmany(*pgcnt, sgllen);
1180 1180                  }
1181 1181                  if (pfnalign)
1182 1182                          pfn = P2ROUNDUP(pfn, pfnalign);
1183 1183          }
1184 1184  
1185 1185          /* cannot find contig pages in specified range */
1186 1186          if (startpfn == lo) {
1187 1187                  CONTIG_UNLOCK();
1188 1188                  return (NULL);
1189 1189          }
1190 1190  
1191 1191          /* did not start with lo previously */
1192 1192          pfn = lo;
1193 1193          if (pfnalign)
1194 1194                  pfn = P2ROUNDUP(pfn, pfnalign);
1195 1195  
1196 1196          /* allow search to go above startpfn */
1197 1197          while (pfn < startpfn) {
1198 1198  
1199 1199                  plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1200 1200                  if (plist != NULL) {
1201 1201  
1202 1202                          page_list_concat(&pplist, &plist);
1203 1203                          sgllen--;
1204 1204  
1205 1205                          /*
1206 1206                           * return when contig pages no longer needed
1207 1207                           */
1208 1208                          if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1209 1209                                  startpfn = pfn;
1210 1210                                  CONTIG_UNLOCK();
1211 1211                                  check_dma(mattr, pplist, *pgcnt);
1212 1212                                  return (pplist);
1213 1213                          }
1214 1214                          minctg = howmany(*pgcnt, sgllen);
1215 1215                  }
1216 1216                  if (pfnalign)
1217 1217                          pfn = P2ROUNDUP(pfn, pfnalign);
1218 1218          }
1219 1219          CONTIG_UNLOCK();
1220 1220          return (NULL);
1221 1221  }
1222 1222  #endif  /* !__xpv */
1223 1223  
1224 1224  /*
1225 1225   * mnode_range_cnt() calculates the number of memory ranges for mnode and
1226 1226   * memranges[]. Used to determine the size of page lists and mnoderanges.
1227 1227   */
1228 1228  int
1229 1229  mnode_range_cnt(int mnode)
1230 1230  {
1231 1231  #if defined(__xpv)
1232 1232          ASSERT(mnode == 0);
1233 1233          return (1);
1234 1234  #else   /* __xpv */
1235 1235          int     mri;
1236 1236          int     mnrcnt = 0;
1237 1237  
1238 1238          if (mem_node_config[mnode].exists != 0) {
1239 1239                  mri = nranges - 1;
1240 1240  
1241 1241                  /* find the memranges index below contained in mnode range */
1242 1242  
1243 1243                  while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1244 1244                          mri--;
1245 1245  
1246 1246                  /*
1247 1247                   * increment mnode range counter when memranges or mnode
1248 1248                   * boundary is reached.
1249 1249                   */
1250 1250                  while (mri >= 0 &&
1251 1251                      mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1252 1252                          mnrcnt++;
1253 1253                          if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1254 1254                                  mri--;
1255 1255                          else
1256 1256                                  break;
1257 1257                  }
1258 1258          }
1259 1259          ASSERT(mnrcnt <= MAX_MNODE_MRANGES);
1260 1260          return (mnrcnt);
1261 1261  #endif  /* __xpv */
1262 1262  }
1263 1263  
1264 1264  /*
1265 1265   * mnode_range_setup() initializes mnoderanges.
1266 1266   */
1267 1267  void
1268 1268  mnode_range_setup(mnoderange_t *mnoderanges)
1269 1269  {
1270 1270          mnoderange_t *mp = mnoderanges;
1271 1271          int     mnode, mri;
1272 1272          int     mindex = 0;     /* current index into mnoderanges array */
1273 1273          int     i, j;
1274 1274          pfn_t   hipfn;
1275 1275          int     last, hi;
1276 1276  
1277 1277          for (mnode = 0; mnode < max_mem_nodes; mnode++) {
1278 1278                  if (mem_node_config[mnode].exists == 0)
1279 1279                          continue;
1280 1280  
1281 1281                  mri = nranges - 1;
1282 1282  
1283 1283                  while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1284 1284                          mri--;
1285 1285  
1286 1286                  while (mri >= 0 && mem_node_config[mnode].physmax >=
1287 1287                      MEMRANGELO(mri)) {
1288 1288                          mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri),
1289 1289                              mem_node_config[mnode].physbase);
1290 1290                          mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri),
1291 1291                              mem_node_config[mnode].physmax);
1292 1292                          mnoderanges->mnr_mnode = mnode;
1293 1293                          mnoderanges->mnr_memrange = mri;
1294 1294                          mnoderanges->mnr_exists = 1;
1295 1295                          mnoderanges++;
1296 1296                          mindex++;
1297 1297                          if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1298 1298                                  mri--;
1299 1299                          else
1300 1300                                  break;
1301 1301                  }
1302 1302          }
1303 1303  
1304 1304          /*
1305 1305           * For now do a simple sort of the mnoderanges array to fill in
1306 1306           * the mnr_next fields.  Since mindex is expected to be relatively
1307 1307           * small, using a simple O(N^2) algorithm.
1308 1308           */
1309 1309          for (i = 0; i < mindex; i++) {
1310 1310                  if (mp[i].mnr_pfnlo == 0)       /* find lowest */
1311 1311                          break;
1312 1312          }
1313 1313          ASSERT(i < mindex);
1314 1314          last = i;
1315 1315          mtype16m = last;
1316 1316          mp[last].mnr_next = -1;
1317 1317          for (i = 0; i < mindex - 1; i++) {
1318 1318                  hipfn = (pfn_t)(-1);
1319 1319                  hi = -1;
1320 1320                  /* find next highest mnode range */
1321 1321                  for (j = 0; j < mindex; j++) {
1322 1322                          if (mp[j].mnr_pfnlo > mp[last].mnr_pfnlo &&
1323 1323                              mp[j].mnr_pfnlo < hipfn) {
1324 1324                                  hipfn = mp[j].mnr_pfnlo;
1325 1325                                  hi = j;
1326 1326                          }
1327 1327                  }
1328 1328                  mp[hi].mnr_next = last;
1329 1329                  last = hi;
1330 1330          }
1331 1331          mtypetop = last;
1332 1332  }
1333 1333  
1334 1334  #ifndef __xpv
1335 1335  /*
1336 1336   * Update mnoderanges for memory hot-add DR operations.
1337 1337   */
1338 1338  static void
1339 1339  mnode_range_add(int mnode)
1340 1340  {
1341 1341          int     *prev;
1342 1342          int     n, mri;
1343 1343          pfn_t   start, end;
1344 1344          extern  void membar_sync(void);
1345 1345  
1346 1346          ASSERT(0 <= mnode && mnode < max_mem_nodes);
1347 1347          ASSERT(mem_node_config[mnode].exists);
1348 1348          start = mem_node_config[mnode].physbase;
1349 1349          end = mem_node_config[mnode].physmax;
1350 1350          ASSERT(start <= end);
1351 1351          mutex_enter(&mnoderange_lock);
1352 1352  
1353 1353  #ifdef  DEBUG
1354 1354          /* Check whether it interleaves with other memory nodes. */
1355 1355          for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1356 1356                  ASSERT(mnoderanges[n].mnr_exists);
1357 1357                  if (mnoderanges[n].mnr_mnode == mnode)
1358 1358                          continue;
1359 1359                  ASSERT(start > mnoderanges[n].mnr_pfnhi ||
1360 1360                      end < mnoderanges[n].mnr_pfnlo);
1361 1361          }
1362 1362  #endif  /* DEBUG */
1363 1363  
1364 1364          mri = nranges - 1;
1365 1365          while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1366 1366                  mri--;
1367 1367          while (mri >= 0 && mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1368 1368                  /* Check whether mtype already exists. */
1369 1369                  for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1370 1370                          if (mnoderanges[n].mnr_mnode == mnode &&
1371 1371                              mnoderanges[n].mnr_memrange == mri) {
1372 1372                                  mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri),
1373 1373                                      start);
1374 1374                                  mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri),
1375 1375                                      end);
1376 1376                                  break;
1377 1377                          }
1378 1378                  }
1379 1379  
1380 1380                  /* Add a new entry if it doesn't exist yet. */
1381 1381                  if (n == -1) {
1382 1382                          /* Try to find an unused entry in mnoderanges array. */
1383 1383                          for (n = 0; n < mnoderangecnt; n++) {
1384 1384                                  if (mnoderanges[n].mnr_exists == 0)
1385 1385                                          break;
1386 1386                          }
1387 1387                          ASSERT(n < mnoderangecnt);
1388 1388                          mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri), start);
1389 1389                          mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri), end);
1390 1390                          mnoderanges[n].mnr_mnode = mnode;
1391 1391                          mnoderanges[n].mnr_memrange = mri;
1392 1392                          mnoderanges[n].mnr_exists = 1;
1393 1393                          /* Page 0 should always be present. */
1394 1394                          for (prev = &mtypetop;
1395 1395                              mnoderanges[*prev].mnr_pfnlo > start;
1396 1396                              prev = &mnoderanges[*prev].mnr_next) {
1397 1397                                  ASSERT(mnoderanges[*prev].mnr_next >= 0);
1398 1398                                  ASSERT(mnoderanges[*prev].mnr_pfnlo > end);
1399 1399                          }
1400 1400                          mnoderanges[n].mnr_next = *prev;
1401 1401                          membar_sync();
1402 1402                          *prev = n;
1403 1403                  }
1404 1404  
1405 1405                  if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1406 1406                          mri--;
1407 1407                  else
1408 1408                          break;
1409 1409          }
1410 1410  
1411 1411          mutex_exit(&mnoderange_lock);
1412 1412  }
1413 1413  
1414 1414  /*
1415 1415   * Update mnoderanges for memory hot-removal DR operations.
1416 1416   */
1417 1417  static void
1418 1418  mnode_range_del(int mnode)
1419 1419  {
1420 1420          _NOTE(ARGUNUSED(mnode));
1421 1421          ASSERT(0 <= mnode && mnode < max_mem_nodes);
1422 1422          /* TODO: support deletion operation. */
1423 1423          ASSERT(0);
1424 1424  }
1425 1425  
1426 1426  void
1427 1427  plat_slice_add(pfn_t start, pfn_t end)
1428 1428  {
1429 1429          mem_node_add_slice(start, end);
1430 1430          if (plat_dr_enabled()) {
1431 1431                  mnode_range_add(PFN_2_MEM_NODE(start));
1432 1432          }
1433 1433  }
1434 1434  
1435 1435  void
1436 1436  plat_slice_del(pfn_t start, pfn_t end)
1437 1437  {
1438 1438          ASSERT(PFN_2_MEM_NODE(start) == PFN_2_MEM_NODE(end));
1439 1439          ASSERT(plat_dr_enabled());
1440 1440          mnode_range_del(PFN_2_MEM_NODE(start));
1441 1441          mem_node_del_slice(start, end);
1442 1442  }
1443 1443  #endif  /* __xpv */
1444 1444  
1445 1445  /*ARGSUSED*/
1446 1446  int
1447 1447  mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz)
1448 1448  {
1449 1449          int mtype = mtypetop;
1450 1450  
1451 1451  #if !defined(__xpv)
1452 1452  #if defined(__i386)
1453 1453          /*
1454 1454           * set the mtype range
1455 1455           * - kmem requests need to be below 4g if restricted_kmemalloc is set.
1456 1456           * - for non kmem requests, set range to above 4g if memory below 4g
1457 1457           * runs low.
1458 1458           */
1459 1459          if (restricted_kmemalloc && VN_ISKAS(vp) &&
1460 1460              (caddr_t)(vaddr) >= kernelheap &&
1461 1461              (caddr_t)(vaddr) < ekernelheap) {
1462 1462                  ASSERT(physmax4g);
1463 1463                  mtype = mtype4g;
1464 1464                  if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz),
1465 1465                      btop(pgsz), *flags)) {
1466 1466                          *flags |= PGI_MT_RANGE16M;
1467 1467                  } else {
1468 1468                          VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1469 1469                          VM_STAT_COND_ADD((*flags & PG_PANIC),
1470 1470                              vmm_vmstats.pgpanicalloc);
1471 1471                          *flags |= PGI_MT_RANGE0;
1472 1472                  }
1473 1473                  return (mtype);
1474 1474          }
1475 1475  #endif  /* __i386 */
1476 1476  
1477 1477          if (RESTRICT4G_ALLOC) {
1478 1478                  VM_STAT_ADD(vmm_vmstats.restrict4gcnt);
1479 1479                  /* here only for > 4g systems */
1480 1480                  *flags |= PGI_MT_RANGE4G;
1481 1481          } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) {
1482 1482                  *flags |= PGI_MT_RANGE16M;
1483 1483          } else {
1484 1484                  VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1485 1485                  VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc);
1486 1486                  *flags |= PGI_MT_RANGE0;
1487 1487          }
1488 1488  #endif /* !__xpv */
1489 1489          return (mtype);
1490 1490  }
1491 1491  
1492 1492  
1493 1493  /* mtype init for page_get_replacement_page */
1494 1494  /*ARGSUSED*/
1495 1495  int
1496 1496  mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt)
1497 1497  {
1498 1498          int mtype = mtypetop;
1499 1499  #if !defined(__xpv)
1500 1500          if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) {
1501 1501                  *flags |= PGI_MT_RANGE16M;
1502 1502          } else {
1503 1503                  VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1504 1504                  *flags |= PGI_MT_RANGE0;
1505 1505          }
1506 1506  #endif
1507 1507          return (mtype);
1508 1508  }
1509 1509  
1510 1510  /*
1511 1511   * Determine if the mnode range specified in mtype contains memory belonging
1512 1512   * to memory node mnode.  If flags & PGI_MT_RANGE is set then mtype contains
1513 1513   * the range from high pfn to 0, 16m or 4g.
1514 1514   *
1515 1515   * Return first mnode range type index found otherwise return -1 if none found.
1516 1516   */
1517 1517  int
1518 1518  mtype_func(int mnode, int mtype, uint_t flags)
1519 1519  {
1520 1520          if (flags & PGI_MT_RANGE) {
1521 1521                  int     mnr_lim = MRI_0;
1522 1522  
1523 1523                  if (flags & PGI_MT_NEXT) {
1524 1524                          mtype = mnoderanges[mtype].mnr_next;
1525 1525                  }
1526 1526                  if (flags & PGI_MT_RANGE4G)
1527 1527                          mnr_lim = MRI_4G;       /* exclude 0-4g range */
1528 1528                  else if (flags & PGI_MT_RANGE16M)
1529 1529                          mnr_lim = MRI_16M;      /* exclude 0-16m range */
1530 1530                  while (mtype != -1 &&
1531 1531                      mnoderanges[mtype].mnr_memrange <= mnr_lim) {
1532 1532                          if (mnoderanges[mtype].mnr_mnode == mnode)
1533 1533                                  return (mtype);
1534 1534                          mtype = mnoderanges[mtype].mnr_next;
1535 1535                  }
1536 1536          } else if (mnoderanges[mtype].mnr_mnode == mnode) {
1537 1537                  return (mtype);
1538 1538          }
1539 1539          return (-1);
1540 1540  }
1541 1541  
1542 1542  /*
1543 1543   * Update the page list max counts with the pfn range specified by the
1544 1544   * input parameters.
1545 1545   */
1546 1546  void
1547 1547  mtype_modify_max(pfn_t startpfn, long cnt)
1548 1548  {
1549 1549          int             mtype;
1550 1550          pgcnt_t         inc;
1551 1551          spgcnt_t        scnt = (spgcnt_t)(cnt);
1552 1552          pgcnt_t         acnt = ABS(scnt);
1553 1553          pfn_t           endpfn = startpfn + acnt;
1554 1554          pfn_t           pfn, lo;
1555 1555  
1556 1556          if (!physmax4g)
1557 1557                  return;
1558 1558  
1559 1559          mtype = mtypetop;
1560 1560          for (pfn = endpfn; pfn > startpfn; ) {
1561 1561                  ASSERT(mtype != -1);
1562 1562                  lo = mnoderanges[mtype].mnr_pfnlo;
1563 1563                  if (pfn > lo) {
1564 1564                          if (startpfn >= lo) {
1565 1565                                  inc = pfn - startpfn;
1566 1566                          } else {
1567 1567                                  inc = pfn - lo;
1568 1568                          }
1569 1569                          if (mnoderanges[mtype].mnr_memrange != MRI_4G) {
1570 1570                                  if (scnt > 0)
1571 1571                                          maxmem4g += inc;
1572 1572                                  else
1573 1573                                          maxmem4g -= inc;
1574 1574                          }
1575 1575                          pfn -= inc;
1576 1576                  }
1577 1577                  mtype = mnoderanges[mtype].mnr_next;
1578 1578          }
1579 1579  }
1580 1580  
1581 1581  int
1582 1582  mtype_2_mrange(int mtype)
1583 1583  {
1584 1584          return (mnoderanges[mtype].mnr_memrange);
1585 1585  }
1586 1586  
1587 1587  void
1588 1588  mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi)
1589 1589  {
1590 1590          _NOTE(ARGUNUSED(mnode));
1591 1591          ASSERT(mnoderanges[mtype].mnr_mnode == mnode);
1592 1592          *pfnlo = mnoderanges[mtype].mnr_pfnlo;
1593 1593          *pfnhi = mnoderanges[mtype].mnr_pfnhi;
1594 1594  }
1595 1595  
1596 1596  size_t
1597 1597  plcnt_sz(size_t ctrs_sz)
1598 1598  {
1599 1599  #ifdef DEBUG
1600 1600          int     szc, colors;
1601 1601  
1602 1602          ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes;
1603 1603          for (szc = 0; szc < mmu_page_sizes; szc++) {
1604 1604                  colors = page_get_pagecolors(szc);
1605 1605                  ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;
1606 1606          }
1607 1607  #endif
1608 1608          return (ctrs_sz);
1609 1609  }
1610 1610  
1611 1611  caddr_t
1612 1612  plcnt_init(caddr_t addr)
1613 1613  {
1614 1614  #ifdef DEBUG
1615 1615          int     mt, szc, colors;
1616 1616  
1617 1617          for (mt = 0; mt < mnoderangecnt; mt++) {
1618 1618                  mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;
1619 1619                  addr += (sizeof (struct mnr_mts) * mmu_page_sizes);
1620 1620                  for (szc = 0; szc < mmu_page_sizes; szc++) {
1621 1621                          colors = page_get_pagecolors(szc);
1622 1622                          mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors;
1623 1623                          mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =
1624 1624                              (pgcnt_t *)addr;
1625 1625                          addr += (sizeof (pgcnt_t) * colors);
1626 1626                  }
1627 1627          }
1628 1628  #endif
1629 1629          return (addr);
1630 1630  }
1631 1631  
1632 1632  void
1633 1633  plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags)
1634 1634  {
1635 1635          _NOTE(ARGUNUSED(pp));
1636 1636  #ifdef DEBUG
1637 1637          int     bin = PP_2_BIN(pp);
1638 1638  
1639 1639          atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt);
1640 1640          atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin],
1641 1641              cnt);
1642 1642  #endif
1643 1643          ASSERT(mtype == PP_2_MTYPE(pp));
1644 1644          if (physmax4g && mnoderanges[mtype].mnr_memrange != MRI_4G)
1645 1645                  atomic_add_long(&freemem4g, cnt);
1646 1646          if (flags & PG_CACHE_LIST)
1647 1647                  atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt);
1648 1648          else
1649 1649                  atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt);
1650 1650          atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt);
1651 1651  }
1652 1652  
1653 1653  /*
1654 1654   * Returns the free page count for mnode
1655 1655   */
1656 1656  int
1657 1657  mnode_pgcnt(int mnode)
1658 1658  {
1659 1659          int     mtype = mtypetop;
1660 1660          int     flags = PGI_MT_RANGE0;
1661 1661          pgcnt_t pgcnt = 0;
1662 1662  
1663 1663          mtype = mtype_func(mnode, mtype, flags);
1664 1664  
1665 1665          while (mtype != -1) {
1666 1666                  pgcnt += MTYPE_FREEMEM(mtype);
1667 1667                  mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);
1668 1668          }
1669 1669          return (pgcnt);
1670 1670  }
1671 1671  
1672 1672  /*
1673 1673   * Initialize page coloring variables based on the l2 cache parameters.
1674 1674   * Calculate and return memory needed for page coloring data structures.
1675 1675   */
1676 1676  size_t
1677 1677  page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc)
1678 1678  {
1679 1679          _NOTE(ARGUNUSED(l2_linesz));
1680 1680          size_t  colorsz = 0;
1681 1681          int     i;
1682 1682          int     colors;
1683 1683  
1684 1684  #if defined(__xpv)
1685 1685          /*
1686 1686           * Hypervisor domains currently don't have any concept of NUMA.
1687 1687           * Hence we'll act like there is only 1 memrange.
1688 1688           */
1689 1689          i = memrange_num(1);
1690 1690  #else /* !__xpv */
1691 1691          /*
1692 1692           * Reduce the memory ranges lists if we don't have large amounts
1693 1693           * of memory. This avoids searching known empty free lists.
1694 1694           * To support memory DR operations, we need to keep memory ranges
1695 1695           * for possible memory hot-add operations.
1696 1696           */
1697 1697          if (plat_dr_physmax > physmax)
1698 1698                  i = memrange_num(plat_dr_physmax);
1699 1699          else
1700 1700                  i = memrange_num(physmax);
1701 1701  #if defined(__i386)
1702 1702          if (i > MRI_4G)
1703 1703                  restricted_kmemalloc = 0;
1704 1704  #endif
1705 1705          /* physmax greater than 4g */
1706 1706          if (i == MRI_4G)
1707 1707                  physmax4g = 1;
1708 1708  #endif /* !__xpv */
1709 1709          memranges += i;
1710 1710          nranges -= i;
1711 1711  
1712 1712          ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES);
1713 1713  
1714 1714          ASSERT(ISP2(l2_linesz));
1715 1715          ASSERT(l2_sz > MMU_PAGESIZE);
1716 1716  
1717 1717          /* l2_assoc is 0 for fully associative l2 cache */
1718 1718          if (l2_assoc)
1719 1719                  l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE));
1720 1720          else
1721 1721                  l2_colors = 1;
1722 1722  
1723 1723          ASSERT(ISP2(l2_colors));
1724 1724  
1725 1725          /* for scalability, configure at least PAGE_COLORS_MIN color bins */
1726 1726          page_colors = MAX(l2_colors, PAGE_COLORS_MIN);
1727 1727  
1728 1728          /*
1729 1729           * cpu_page_colors is non-zero when a page color may be spread across
1730 1730           * multiple bins.
1731 1731           */
1732 1732          if (l2_colors < page_colors)
1733 1733                  cpu_page_colors = l2_colors;
1734 1734  
1735 1735          ASSERT(ISP2(page_colors));
1736 1736  
1737 1737          page_colors_mask = page_colors - 1;
1738 1738  
1739 1739          ASSERT(ISP2(CPUSETSIZE()));
1740 1740          page_coloring_shift = lowbit(CPUSETSIZE());
1741 1741  
1742 1742          /* initialize number of colors per page size */
1743 1743          for (i = 0; i <= mmu.max_page_level; i++) {
1744 1744                  hw_page_array[i].hp_size = LEVEL_SIZE(i);
1745 1745                  hw_page_array[i].hp_shift = LEVEL_SHIFT(i);
1746 1746                  hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0);
1747 1747                  hw_page_array[i].hp_colors = (page_colors_mask >>
1748 1748                      (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift))
1749 1749                      + 1;
1750 1750                  colorequivszc[i] = 0;
1751 1751          }
1752 1752  
1753 1753          /*
1754 1754           * The value of cpu_page_colors determines if additional color bins
1755 1755           * need to be checked for a particular color in the page_get routines.
1756 1756           */
1757 1757          if (cpu_page_colors != 0) {
1758 1758  
1759 1759                  int a = lowbit(page_colors) - lowbit(cpu_page_colors);
1760 1760                  ASSERT(a > 0);
1761 1761                  ASSERT(a < 16);
1762 1762  
1763 1763                  for (i = 0; i <= mmu.max_page_level; i++) {
1764 1764                          if ((colors = hw_page_array[i].hp_colors) <= 1) {
1765 1765                                  colorequivszc[i] = 0;
1766 1766                                  continue;
1767 1767                          }
1768 1768                          while ((colors >> a) == 0)
1769 1769                                  a--;
1770 1770                          ASSERT(a >= 0);
1771 1771  
1772 1772                          /* higher 4 bits encodes color equiv mask */
1773 1773                          colorequivszc[i] = (a << 4);
1774 1774                  }
1775 1775          }
1776 1776  
1777 1777          /* factor in colorequiv to check additional 'equivalent' bins. */
1778 1778          if (colorequiv > 1) {
1779 1779  
1780 1780                  int a = lowbit(colorequiv) - 1;
1781 1781                  if (a > 15)
1782 1782                          a = 15;
1783 1783  
1784 1784                  for (i = 0; i <= mmu.max_page_level; i++) {
1785 1785                          if ((colors = hw_page_array[i].hp_colors) <= 1) {
1786 1786                                  continue;
1787 1787                          }
1788 1788                          while ((colors >> a) == 0)
1789 1789                                  a--;
1790 1790                          if ((a << 4) > colorequivszc[i]) {
1791 1791                                  colorequivszc[i] = (a << 4);
1792 1792                          }
1793 1793                  }
1794 1794          }
1795 1795  
1796 1796          /* size for mnoderanges */
1797 1797          for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++)
1798 1798                  mnoderangecnt += mnode_range_cnt(i);
1799 1799          if (plat_dr_support_memory()) {
1800 1800                  /*
1801 1801                   * Reserve enough space for memory DR operations.
1802 1802                   * Two extra mnoderanges for possbile fragmentations,
1803 1803                   * one for the 2G boundary and the other for the 4G boundary.
1804 1804                   * We don't expect a memory board crossing the 16M boundary
1805 1805                   * for memory hot-add operations on x86 platforms.
1806 1806                   */
1807 1807                  mnoderangecnt += 2 + max_mem_nodes - lgrp_plat_node_cnt;
1808 1808          }
1809 1809          colorsz = mnoderangecnt * sizeof (mnoderange_t);
1810 1810  
1811 1811          /* size for fpc_mutex and cpc_mutex */
1812 1812          colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX);
1813 1813  
1814 1814          /* size of page_freelists */
1815 1815          colorsz += mnoderangecnt * sizeof (page_t ***);
1816 1816          colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **);
1817 1817  
1818 1818          for (i = 0; i < mmu_page_sizes; i++) {
1819 1819                  colors = page_get_pagecolors(i);
1820 1820                  colorsz += mnoderangecnt * colors * sizeof (page_t *);
1821 1821          }
1822 1822  
1823 1823          /* size of page_cachelists */
1824 1824          colorsz += mnoderangecnt * sizeof (page_t **);
1825 1825          colorsz += mnoderangecnt * page_colors * sizeof (page_t *);
1826 1826  
1827 1827          return (colorsz);
1828 1828  }
1829 1829  
1830 1830  /*
1831 1831   * Called once at startup to configure page_coloring data structures and
1832 1832   * does the 1st page_free()/page_freelist_add().
1833 1833   */
1834 1834  void
1835 1835  page_coloring_setup(caddr_t pcmemaddr)
1836 1836  {
1837 1837          int     i;
1838 1838          int     j;
1839 1839          int     k;
1840 1840          caddr_t addr;
1841 1841          int     colors;
1842 1842  
1843 1843          /*
1844 1844           * do page coloring setup
1845 1845           */
1846 1846          addr = pcmemaddr;
1847 1847  
1848 1848          mnoderanges = (mnoderange_t *)addr;
1849 1849          addr += (mnoderangecnt * sizeof (mnoderange_t));
1850 1850  
1851 1851          mnode_range_setup(mnoderanges);
1852 1852  
1853 1853          if (physmax4g)
1854 1854                  mtype4g = pfn_2_mtype(0xfffff);
1855 1855  
1856 1856          for (k = 0; k < NPC_MUTEX; k++) {
1857 1857                  fpc_mutex[k] = (kmutex_t *)addr;
1858 1858                  addr += (max_mem_nodes * sizeof (kmutex_t));
1859 1859          }
1860 1860          for (k = 0; k < NPC_MUTEX; k++) {
1861 1861                  cpc_mutex[k] = (kmutex_t *)addr;
1862 1862                  addr += (max_mem_nodes * sizeof (kmutex_t));
1863 1863          }
1864 1864          page_freelists = (page_t ****)addr;
1865 1865          addr += (mnoderangecnt * sizeof (page_t ***));
1866 1866  
1867 1867          page_cachelists = (page_t ***)addr;
1868 1868          addr += (mnoderangecnt * sizeof (page_t **));
1869 1869  
1870 1870          for (i = 0; i < mnoderangecnt; i++) {
1871 1871                  page_freelists[i] = (page_t ***)addr;
1872 1872                  addr += (mmu_page_sizes * sizeof (page_t **));
1873 1873  
1874 1874                  for (j = 0; j < mmu_page_sizes; j++) {
1875 1875                          colors = page_get_pagecolors(j);
1876 1876                          page_freelists[i][j] = (page_t **)addr;
1877 1877                          addr += (colors * sizeof (page_t *));
1878 1878                  }
1879 1879                  page_cachelists[i] = (page_t **)addr;
1880 1880                  addr += (page_colors * sizeof (page_t *));
1881 1881          }
1882 1882  }
1883 1883  
1884 1884  #if defined(__xpv)
1885 1885  /*
1886 1886   * Give back 10% of the io_pool pages to the free list.
1887 1887   * Don't shrink the pool below some absolute minimum.
1888 1888   */
1889 1889  static void
1890 1890  page_io_pool_shrink()
1891 1891  {
1892 1892          int retcnt;
1893 1893          page_t *pp, *pp_first, *pp_last, **curpool;
1894 1894          mfn_t mfn;
1895 1895          int bothpools = 0;
1896 1896  
1897 1897          mutex_enter(&io_pool_lock);
1898 1898          io_pool_shrink_attempts++;      /* should be a kstat? */
1899 1899          retcnt = io_pool_cnt / 10;
1900 1900          if (io_pool_cnt - retcnt < io_pool_cnt_min)
1901 1901                  retcnt = io_pool_cnt - io_pool_cnt_min;
1902 1902          if (retcnt <= 0)
1903 1903                  goto done;
1904 1904          io_pool_shrinks++;      /* should be a kstat? */
1905 1905          curpool = &io_pool_4g;
1906 1906  domore:
1907 1907          /*
1908 1908           * Loop through taking pages from the end of the list
1909 1909           * (highest mfns) till amount to return reached.
1910 1910           */
1911 1911          for (pp = *curpool; pp && retcnt > 0; ) {
1912 1912                  pp_first = pp_last = pp->p_prev;
1913 1913                  if (pp_first == *curpool)
1914 1914                          break;
1915 1915                  retcnt--;
1916 1916                  io_pool_cnt--;
1917 1917                  page_io_pool_sub(curpool, pp_first, pp_last);
1918 1918                  if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn)
1919 1919                          start_mfn = mfn;
1920 1920                  page_free(pp_first, 1);
1921 1921                  pp = *curpool;
1922 1922          }
1923 1923          if (retcnt != 0 && !bothpools) {
1924 1924                  /*
1925 1925                   * If not enough found in less constrained pool try the
1926 1926                   * more constrained one.
1927 1927                   */
1928 1928                  curpool = &io_pool_16m;
1929 1929                  bothpools = 1;
1930 1930                  goto domore;
1931 1931          }
1932 1932  done:
1933 1933          mutex_exit(&io_pool_lock);
1934 1934  }
1935 1935  
1936 1936  #endif  /* __xpv */
1937 1937  
1938 1938  uint_t
1939 1939  page_create_update_flags_x86(uint_t flags)
1940 1940  {
1941 1941  #if defined(__xpv)
1942 1942          /*
1943 1943           * Check this is an urgent allocation and free pages are depleted.
1944 1944           */
1945 1945          if (!(flags & PG_WAIT) && freemem < desfree)
1946 1946                  page_io_pool_shrink();
1947 1947  #else /* !__xpv */
1948 1948          /*
1949 1949           * page_create_get_something may call this because 4g memory may be
1950 1950           * depleted. Set flags to allow for relocation of base page below
1951 1951           * 4g if necessary.
1952 1952           */
1953 1953          if (physmax4g)
1954 1954                  flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
1955 1955  #endif /* __xpv */
1956 1956          return (flags);
1957 1957  }
1958 1958  
1959 1959  /*ARGSUSED*/
1960 1960  int
1961 1961  bp_color(struct buf *bp)
1962 1962  {
1963 1963          return (0);
1964 1964  }
1965 1965  
1966 1966  #if defined(__xpv)
1967 1967  
1968 1968  /*
1969 1969   * Take pages out of an io_pool
1970 1970   */
1971 1971  static void
1972 1972  page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last)
1973 1973  {
1974 1974          if (*poolp == pp_first) {
1975 1975                  *poolp = pp_last->p_next;
1976 1976                  if (*poolp == pp_first)
1977 1977                          *poolp = NULL;
1978 1978          }
1979 1979          pp_first->p_prev->p_next = pp_last->p_next;
1980 1980          pp_last->p_next->p_prev = pp_first->p_prev;
1981 1981          pp_first->p_prev = pp_last;
1982 1982          pp_last->p_next = pp_first;
1983 1983  }
1984 1984  
1985 1985  /*
1986 1986   * Put a page on the io_pool list. The list is ordered by increasing MFN.
1987 1987   */
1988 1988  static void
1989 1989  page_io_pool_add(page_t **poolp, page_t *pp)
1990 1990  {
1991 1991          page_t  *look;
1992 1992          mfn_t   mfn = mfn_list[pp->p_pagenum];
1993 1993  
1994 1994          if (*poolp == NULL) {
1995 1995                  *poolp = pp;
1996 1996                  pp->p_next = pp;
1997 1997                  pp->p_prev = pp;
1998 1998                  return;
1999 1999          }
2000 2000  
2001 2001          /*
2002 2002           * Since we try to take pages from the high end of the pool
2003 2003           * chances are good that the pages to be put on the list will
2004 2004           * go at or near the end of the list. so start at the end and
2005 2005           * work backwards.
2006 2006           */
2007 2007          look = (*poolp)->p_prev;
2008 2008          while (mfn < mfn_list[look->p_pagenum]) {
2009 2009                  look = look->p_prev;
2010 2010                  if (look == (*poolp)->p_prev)
2011 2011                          break; /* backed all the way to front of list */
2012 2012          }
2013 2013  
2014 2014          /* insert after look */
2015 2015          pp->p_prev = look;
2016 2016          pp->p_next = look->p_next;
2017 2017          pp->p_next->p_prev = pp;
2018 2018          look->p_next = pp;
2019 2019          if (mfn < mfn_list[(*poolp)->p_pagenum]) {
2020 2020                  /*
2021 2021                   * we inserted a new first list element
2022 2022                   * adjust pool pointer to newly inserted element
2023 2023                   */
2024 2024                  *poolp = pp;
2025 2025          }
2026 2026  }
2027 2027  
2028 2028  /*
2029 2029   * Add a page to the io_pool.  Setting the force flag will force the page
2030 2030   * into the io_pool no matter what.
2031 2031   */
2032 2032  static void
2033 2033  add_page_to_pool(page_t *pp, int force)
2034 2034  {
2035 2035          page_t *highest;
2036 2036          page_t *freep = NULL;
2037 2037  
2038 2038          mutex_enter(&io_pool_lock);
2039 2039          /*
2040 2040           * Always keep the scarce low memory pages
2041 2041           */
2042 2042          if (mfn_list[pp->p_pagenum] < PFN_16MEG) {
2043 2043                  ++io_pool_cnt;
2044 2044                  page_io_pool_add(&io_pool_16m, pp);
2045 2045                  goto done;
2046 2046          }
2047 2047          if (io_pool_cnt < io_pool_cnt_max || force || io_pool_4g == NULL) {
2048 2048                  ++io_pool_cnt;
2049 2049                  page_io_pool_add(&io_pool_4g, pp);
2050 2050          } else {
2051 2051                  highest = io_pool_4g->p_prev;
2052 2052                  if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) {
2053 2053                          page_io_pool_sub(&io_pool_4g, highest, highest);
2054 2054                          page_io_pool_add(&io_pool_4g, pp);
2055 2055                          freep = highest;
2056 2056                  } else {
2057 2057                          freep = pp;
2058 2058                  }
2059 2059          }
2060 2060  done:
2061 2061          mutex_exit(&io_pool_lock);
2062 2062          if (freep)
2063 2063                  page_free(freep, 1);
2064 2064  }
2065 2065  
2066 2066  
2067 2067  int contig_pfn_cnt;     /* no of pfns in the contig pfn list */
2068 2068  int contig_pfn_max;     /* capacity of the contig pfn list */
2069 2069  int next_alloc_pfn;     /* next position in list to start a contig search */
2070 2070  int contig_pfnlist_updates;     /* pfn list update count */
2071 2071  int contig_pfnlist_builds;      /* how many times have we (re)built list */
2072 2072  int contig_pfnlist_buildfailed; /* how many times has list build failed */
2073 2073  int create_contig_pending;      /* nonzero means taskq creating contig list */
2074 2074  pfn_t *contig_pfn_list = NULL;  /* list of contig pfns in ascending mfn order */
2075 2075  
2076 2076  /*
2077 2077   * Function to use in sorting a list of pfns by their underlying mfns.
2078 2078   */
2079 2079  static int
2080 2080  mfn_compare(const void *pfnp1, const void *pfnp2)
2081 2081  {
2082 2082          mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1];
2083 2083          mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2];
2084 2084  
2085 2085          if (mfn1 > mfn2)
2086 2086                  return (1);
2087 2087          if (mfn1 < mfn2)
2088 2088                  return (-1);
2089 2089          return (0);
2090 2090  }
2091 2091  
2092 2092  /*
2093 2093   * Compact the contig_pfn_list by tossing all the non-contiguous
2094 2094   * elements from the list.
2095 2095   */
2096 2096  static void
2097 2097  compact_contig_pfn_list(void)
2098 2098  {
2099 2099          pfn_t pfn, lapfn, prev_lapfn;
2100 2100          mfn_t mfn;
2101 2101          int i, newcnt = 0;
2102 2102  
2103 2103          prev_lapfn = 0;
2104 2104          for (i = 0; i < contig_pfn_cnt - 1; i++) {
2105 2105                  pfn = contig_pfn_list[i];
2106 2106                  lapfn = contig_pfn_list[i + 1];
2107 2107                  mfn = mfn_list[pfn];
2108 2108                  /*
2109 2109                   * See if next pfn is for a contig mfn
2110 2110                   */
2111 2111                  if (mfn_list[lapfn] != mfn + 1)
2112 2112                          continue;
2113 2113                  /*
2114 2114                   * pfn and lookahead are both put in list
2115 2115                   * unless pfn is the previous lookahead.
2116 2116                   */
2117 2117                  if (pfn != prev_lapfn)
2118 2118                          contig_pfn_list[newcnt++] = pfn;
2119 2119                  contig_pfn_list[newcnt++] = lapfn;
2120 2120                  prev_lapfn = lapfn;
2121 2121          }
2122 2122          for (i = newcnt; i < contig_pfn_cnt; i++)
2123 2123                  contig_pfn_list[i] = 0;
2124 2124          contig_pfn_cnt = newcnt;
2125 2125  }
2126 2126  
2127 2127  /*ARGSUSED*/
2128 2128  static void
2129 2129  call_create_contiglist(void *arg)
2130 2130  {
2131 2131          (void) create_contig_pfnlist(PG_WAIT);
2132 2132  }
2133 2133  
2134 2134  /*
2135 2135   * Create list of freelist pfns that have underlying
2136 2136   * contiguous mfns.  The list is kept in ascending mfn order.
2137 2137   * returns 1 if list created else 0.
2138 2138   */
2139 2139  static int
2140 2140  create_contig_pfnlist(uint_t flags)
2141 2141  {
2142 2142          pfn_t pfn;
2143 2143          page_t *pp;
2144 2144          int ret = 1;
2145 2145  
2146 2146          mutex_enter(&contig_list_lock);
2147 2147          if (contig_pfn_list != NULL)
2148 2148                  goto out;
2149 2149          contig_pfn_max = freemem + (freemem / 10);
2150 2150          contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t),
2151 2151              (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP);
2152 2152          if (contig_pfn_list == NULL) {
2153 2153                  /*
2154 2154                   * If we could not create the contig list (because
2155 2155                   * we could not sleep for memory).  Dispatch a taskq that can
2156 2156                   * sleep to get the memory.
2157 2157                   */
2158 2158                  if (!create_contig_pending) {
2159 2159                          if (taskq_dispatch(system_taskq, call_create_contiglist,
2160 2160                              NULL, TQ_NOSLEEP) != NULL)
2161 2161                                  create_contig_pending = 1;
2162 2162                  }
2163 2163                  contig_pfnlist_buildfailed++;   /* count list build failures */
2164 2164                  ret = 0;
2165 2165                  goto out;
2166 2166          }
2167 2167          create_contig_pending = 0;
2168 2168          ASSERT(contig_pfn_cnt == 0);
2169 2169          for (pfn = 0; pfn < mfn_count; pfn++) {
2170 2170                  pp = page_numtopp_nolock(pfn);
2171 2171                  if (pp == NULL || !PP_ISFREE(pp))
2172 2172                          continue;
2173 2173                  contig_pfn_list[contig_pfn_cnt] = pfn;
2174 2174                  if (++contig_pfn_cnt == contig_pfn_max)
2175 2175                          break;
2176 2176          }
2177 2177          /*
2178 2178           * Sanity check the new list.
2179 2179           */
2180 2180          if (contig_pfn_cnt < 2) { /* no contig pfns */
2181 2181                  contig_pfn_cnt = 0;
2182 2182                  contig_pfnlist_buildfailed++;
2183 2183                  kmem_free(contig_pfn_list, contig_pfn_max * sizeof (pfn_t));
2184 2184                  contig_pfn_list = NULL;
2185 2185                  contig_pfn_max = 0;
2186 2186                  ret = 0;
2187 2187                  goto out;
2188 2188          }
2189 2189          qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare);
2190 2190          compact_contig_pfn_list();
2191 2191          /*
2192 2192           * Make sure next search of the newly created contiguous pfn
2193 2193           * list starts at the beginning of the list.
2194 2194           */
2195 2195          next_alloc_pfn = 0;
2196 2196          contig_pfnlist_builds++;        /* count list builds */
2197 2197  out:
2198 2198          mutex_exit(&contig_list_lock);
2199 2199          return (ret);
2200 2200  }
2201 2201  
2202 2202  
2203 2203  /*
2204 2204   * Toss the current contig pfnlist.  Someone is about to do a massive
2205 2205   * update to pfn<->mfn mappings.  So we have them destroy the list and lock
2206 2206   * it till they are done with their update.
2207 2207   */
2208 2208  void
2209 2209  clear_and_lock_contig_pfnlist()
2210 2210  {
2211 2211          pfn_t *listp = NULL;
2212 2212          size_t listsize;
2213 2213  
2214 2214          mutex_enter(&contig_list_lock);
2215 2215          if (contig_pfn_list != NULL) {
2216 2216                  listp = contig_pfn_list;
2217 2217                  listsize = contig_pfn_max * sizeof (pfn_t);
2218 2218                  contig_pfn_list = NULL;
2219 2219                  contig_pfn_max = contig_pfn_cnt = 0;
2220 2220          }
2221 2221          if (listp != NULL)
2222 2222                  kmem_free(listp, listsize);
2223 2223  }
2224 2224  
2225 2225  /*
2226 2226   * Unlock the contig_pfn_list.  The next attempted use of it will cause
2227 2227   * it to be re-created.
2228 2228   */
2229 2229  void
2230 2230  unlock_contig_pfnlist()
2231 2231  {
2232 2232          mutex_exit(&contig_list_lock);
2233 2233  }
2234 2234  
2235 2235  /*
2236 2236   * Update the contiguous pfn list in response to a pfn <-> mfn reassignment
2237 2237   */
2238 2238  void
2239 2239  update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn)
2240 2240  {
2241 2241          int probe_hi, probe_lo, probe_pos, insert_after, insert_point;
2242 2242          pfn_t probe_pfn;
2243 2243          mfn_t probe_mfn;
2244 2244          int drop_lock = 0;
2245 2245  
2246 2246          if (mutex_owner(&contig_list_lock) != curthread) {
2247 2247                  drop_lock = 1;
2248 2248                  mutex_enter(&contig_list_lock);
2249 2249          }
2250 2250          if (contig_pfn_list == NULL)
2251 2251                  goto done;
2252 2252          contig_pfnlist_updates++;
2253 2253          /*
2254 2254           * Find the pfn in the current list.  Use a binary chop to locate it.
2255 2255           */
2256 2256          probe_hi = contig_pfn_cnt - 1;
2257 2257          probe_lo = 0;
2258 2258          probe_pos = (probe_hi + probe_lo) / 2;
2259 2259          while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) {
2260 2260                  if (probe_pos == probe_lo) { /* pfn not in list */
2261 2261                          probe_pos = -1;
2262 2262                          break;
2263 2263                  }
2264 2264                  if (pfn_to_mfn(probe_pfn) <= oldmfn)
2265 2265                          probe_lo = probe_pos;
2266 2266                  else
2267 2267                          probe_hi = probe_pos;
2268 2268                  probe_pos = (probe_hi + probe_lo) / 2;
2269 2269          }
2270 2270          if (probe_pos >= 0) {
2271 2271                  /*
2272 2272                   * Remove pfn from list and ensure next alloc
2273 2273                   * position stays in bounds.
2274 2274                   */
2275 2275                  if (--contig_pfn_cnt <= next_alloc_pfn)
2276 2276                          next_alloc_pfn = 0;
2277 2277                  if (contig_pfn_cnt < 2) { /* no contig pfns */
2278 2278                          contig_pfn_cnt = 0;
2279 2279                          kmem_free(contig_pfn_list,
2280 2280                              contig_pfn_max * sizeof (pfn_t));
2281 2281                          contig_pfn_list = NULL;
2282 2282                          contig_pfn_max = 0;
2283 2283                          goto done;
2284 2284                  }
2285 2285                  ovbcopy(&contig_pfn_list[probe_pos + 1],
2286 2286                      &contig_pfn_list[probe_pos],
2287 2287                      (contig_pfn_cnt - probe_pos) * sizeof (pfn_t));
2288 2288          }
2289 2289          if (newmfn == MFN_INVALID)
2290 2290                  goto done;
2291 2291          /*
2292 2292           * Check if new mfn has adjacent mfns in the list
2293 2293           */
2294 2294          probe_hi = contig_pfn_cnt - 1;
2295 2295          probe_lo = 0;
2296 2296          insert_after = -2;
2297 2297          do {
2298 2298                  probe_pos = (probe_hi + probe_lo) / 2;
2299 2299                  probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]);
2300 2300                  if (newmfn == probe_mfn + 1)
2301 2301                          insert_after = probe_pos;
2302 2302                  else if (newmfn == probe_mfn - 1)
2303 2303                          insert_after = probe_pos - 1;
2304 2304                  if (probe_pos == probe_lo)
2305 2305                          break;
2306 2306                  if (probe_mfn <= newmfn)
2307 2307                          probe_lo = probe_pos;
2308 2308                  else
2309 2309                          probe_hi = probe_pos;
2310 2310          } while (insert_after == -2);
2311 2311          /*
2312 2312           * If there is space in the list and there are adjacent mfns
2313 2313           * insert the pfn in to its proper place in the list.
2314 2314           */
2315 2315          if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) {
2316 2316                  insert_point = insert_after + 1;
2317 2317                  ovbcopy(&contig_pfn_list[insert_point],
2318 2318                      &contig_pfn_list[insert_point + 1],
2319 2319                      (contig_pfn_cnt - insert_point) * sizeof (pfn_t));
2320 2320                  contig_pfn_list[insert_point] = pfn;
2321 2321                  contig_pfn_cnt++;
2322 2322          }
2323 2323  done:
2324 2324          if (drop_lock)
2325 2325                  mutex_exit(&contig_list_lock);
2326 2326  }
2327 2327  
2328 2328  /*
2329 2329   * Called to (re-)populate the io_pool from the free page lists.
2330 2330   */
2331 2331  long
2332 2332  populate_io_pool(void)
2333 2333  {
2334 2334          pfn_t pfn;
2335 2335          mfn_t mfn, max_mfn;
2336 2336          page_t *pp;
2337 2337  
2338 2338          /*
2339 2339           * Figure out the bounds of the pool on first invocation.
2340 2340           * We use a percentage of memory for the io pool size.
2341 2341           * we allow that to shrink, but not to less than a fixed minimum
2342 2342           */
2343 2343          if (io_pool_cnt_max == 0) {
2344 2344                  io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct);
2345 2345                  io_pool_cnt_lowater = io_pool_cnt_max;
2346 2346                  /*
2347 2347                   * This is the first time in populate_io_pool, grab a va to use
2348 2348                   * when we need to allocate pages.
2349 2349                   */
2350 2350                  io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
2351 2351          }
2352 2352          /*
2353 2353           * If we are out of pages in the pool, then grow the size of the pool
2354 2354           */
2355 2355          if (io_pool_cnt == 0) {
2356 2356                  /*
2357 2357                   * Grow the max size of the io pool by 5%, but never more than
2358 2358                   * 25% of physical memory.
2359 2359                   */
2360 2360                  if (io_pool_cnt_max < physmem / 4)
2361 2361                          io_pool_cnt_max += io_pool_cnt_max / 20;
2362 2362          }
2363 2363          io_pool_grows++;        /* should be a kstat? */
2364 2364  
2365 2365          /*
2366 2366           * Get highest mfn on this platform, but limit to the 32 bit DMA max.
2367 2367           */
2368 2368          (void) mfn_to_pfn(start_mfn);
2369 2369          max_mfn = MIN(cached_max_mfn, PFN_4GIG);
2370 2370          for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) {
2371 2371                  pfn = mfn_to_pfn(mfn);
2372 2372                  if (pfn & PFN_IS_FOREIGN_MFN)
2373 2373                          continue;
2374 2374                  /*
2375 2375                   * try to allocate it from free pages
2376 2376                   */
2377 2377                  pp = page_numtopp_alloc(pfn);
2378 2378                  if (pp == NULL)
2379 2379                          continue;
2380 2380                  PP_CLRFREE(pp);
2381 2381                  add_page_to_pool(pp, 1);
2382 2382                  if (io_pool_cnt >= io_pool_cnt_max)
2383 2383                          break;
2384 2384          }
2385 2385  
2386 2386          return (io_pool_cnt);
2387 2387  }
2388 2388  
2389 2389  /*
2390 2390   * Destroy a page that was being used for DMA I/O. It may or
2391 2391   * may not actually go back to the io_pool.
2392 2392   */
2393 2393  void
2394 2394  page_destroy_io(page_t *pp)
2395 2395  {
2396 2396          mfn_t mfn = mfn_list[pp->p_pagenum];
2397 2397  
2398 2398          /*
2399 2399           * When the page was alloc'd a reservation was made, release it now
2400 2400           */
2401 2401          page_unresv(1);
2402 2402          /*
2403 2403           * Unload translations, if any, then hash out the
2404 2404           * page to erase its identity.
2405 2405           */
2406 2406          (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
2407 2407          page_hashout(pp, NULL);
2408 2408  
2409 2409          /*
2410 2410           * If the page came from the free lists, just put it back to them.
2411 2411           * DomU pages always go on the free lists as well.
2412 2412           */
2413 2413          if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) {
2414 2414                  page_free(pp, 1);
2415 2415                  return;
2416 2416          }
2417 2417  
2418 2418          add_page_to_pool(pp, 0);
2419 2419  }
2420 2420  
2421 2421  
2422 2422  long contig_searches;           /* count of times contig pages requested */
2423 2423  long contig_search_restarts;    /* count of contig ranges tried */
2424 2424  long contig_search_failed;      /* count of contig alloc failures */
2425 2425  
2426 2426  /*
2427 2427   * Free partial page list
2428 2428   */
2429 2429  static void
2430 2430  free_partial_list(page_t **pplist)
2431 2431  {
2432 2432          page_t *pp;
2433 2433  
2434 2434          while (*pplist != NULL) {
2435 2435                  pp = *pplist;
2436 2436                  page_io_pool_sub(pplist, pp, pp);
2437 2437                  page_free(pp, 1);
2438 2438          }
2439 2439  }
2440 2440  
2441 2441  /*
2442 2442   * Look thru the contiguous pfns that are not part of the io_pool for
2443 2443   * contiguous free pages.  Return a list of the found pages or NULL.
2444 2444   */
2445 2445  page_t *
2446 2446  find_contig_free(uint_t npages, uint_t flags, uint64_t pfnseg,
2447 2447      pgcnt_t pfnalign)
2448 2448  {
2449 2449          page_t *pp, *plist = NULL;
2450 2450          mfn_t mfn, prev_mfn, start_mfn;
2451 2451          pfn_t pfn;
2452 2452          int pages_needed, pages_requested;
2453 2453          int search_start;
2454 2454  
2455 2455          /*
2456 2456           * create the contig pfn list if not already done
2457 2457           */
2458 2458  retry:
2459 2459          mutex_enter(&contig_list_lock);
2460 2460          if (contig_pfn_list == NULL) {
2461 2461                  mutex_exit(&contig_list_lock);
2462 2462                  if (!create_contig_pfnlist(flags)) {
2463 2463                          return (NULL);
2464 2464                  }
2465 2465                  goto retry;
2466 2466          }
2467 2467          contig_searches++;
2468 2468          /*
2469 2469           * Search contiguous pfn list for physically contiguous pages not in
2470 2470           * the io_pool.  Start the search where the last search left off.
2471 2471           */
2472 2472          pages_requested = pages_needed = npages;
2473 2473          search_start = next_alloc_pfn;
2474 2474          start_mfn = prev_mfn = 0;
2475 2475          while (pages_needed) {
2476 2476                  pfn = contig_pfn_list[next_alloc_pfn];
2477 2477                  mfn = pfn_to_mfn(pfn);
2478 2478                  /*
2479 2479                   * Check if mfn is first one or contig to previous one and
2480 2480                   * if page corresponding to mfn is free and that mfn
2481 2481                   * range is not crossing a segment boundary.
2482 2482                   */
2483 2483                  if ((prev_mfn == 0 || mfn == prev_mfn + 1) &&
2484 2484                      (pp = page_numtopp_alloc(pfn)) != NULL &&
2485 2485                      !((mfn & pfnseg) < (start_mfn & pfnseg))) {
2486 2486                          PP_CLRFREE(pp);
2487 2487                          page_io_pool_add(&plist, pp);
2488 2488                          pages_needed--;
2489 2489                          if (prev_mfn == 0) {
2490 2490                                  if (pfnalign &&
2491 2491                                      mfn != P2ROUNDUP(mfn, pfnalign)) {
2492 2492                                          /*
2493 2493                                           * not properly aligned
2494 2494                                           */
2495 2495                                          contig_search_restarts++;
2496 2496                                          free_partial_list(&plist);
2497 2497                                          pages_needed = pages_requested;
2498 2498                                          start_mfn = prev_mfn = 0;
2499 2499                                          goto skip;
2500 2500                                  }
2501 2501                                  start_mfn = mfn;
2502 2502                          }
2503 2503                          prev_mfn = mfn;
2504 2504                  } else {
2505 2505                          contig_search_restarts++;
2506 2506                          free_partial_list(&plist);
2507 2507                          pages_needed = pages_requested;
2508 2508                          start_mfn = prev_mfn = 0;
2509 2509                  }
2510 2510  skip:
2511 2511                  if (++next_alloc_pfn == contig_pfn_cnt)
2512 2512                          next_alloc_pfn = 0;
2513 2513                  if (next_alloc_pfn == search_start)
2514 2514                          break; /* all pfns searched */
2515 2515          }
2516 2516          mutex_exit(&contig_list_lock);
2517 2517          if (pages_needed) {
2518 2518                  contig_search_failed++;
2519 2519                  /*
2520 2520                   * Failed to find enough contig pages.
2521 2521                   * free partial page list
2522 2522                   */
2523 2523                  free_partial_list(&plist);
2524 2524          }
2525 2525          return (plist);
2526 2526  }
2527 2527  
2528 2528  /*
2529 2529   * Search the reserved io pool pages for a page range with the
2530 2530   * desired characteristics.
2531 2531   */
2532 2532  page_t *
2533 2533  page_io_pool_alloc(ddi_dma_attr_t *mattr, int contig, pgcnt_t minctg)
2534 2534  {
2535 2535          page_t *pp_first, *pp_last;
2536 2536          page_t *pp, **poolp;
2537 2537          pgcnt_t nwanted, pfnalign;
2538 2538          uint64_t pfnseg;
2539 2539          mfn_t mfn, tmfn, hi_mfn, lo_mfn;
2540 2540          int align, attempt = 0;
2541 2541  
2542 2542          if (minctg == 1)
2543 2543                  contig = 0;
2544 2544          lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2545 2545          hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2546 2546          pfnseg = mmu_btop(mattr->dma_attr_seg);
2547 2547          align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2548 2548          if (align > MMU_PAGESIZE)
2549 2549                  pfnalign = mmu_btop(align);
2550 2550          else
2551 2551                  pfnalign = 0;
2552 2552  
2553 2553  try_again:
2554 2554          /*
2555 2555           * See if we want pages for a legacy device
2556 2556           */
2557 2557          if (hi_mfn < PFN_16MEG)
2558 2558                  poolp = &io_pool_16m;
2559 2559          else
2560 2560                  poolp = &io_pool_4g;
2561 2561  try_smaller:
2562 2562          /*
2563 2563           * Take pages from I/O pool. We'll use pages from the highest
2564 2564           * MFN range possible.
2565 2565           */
2566 2566          pp_first = pp_last = NULL;
2567 2567          mutex_enter(&io_pool_lock);
2568 2568          nwanted = minctg;
2569 2569          for (pp = *poolp; pp && nwanted > 0; ) {
2570 2570                  pp = pp->p_prev;
2571 2571  
2572 2572                  /*
2573 2573                   * skip pages above allowable range
2574 2574                   */
2575 2575                  mfn = mfn_list[pp->p_pagenum];
2576 2576                  if (hi_mfn < mfn)
2577 2577                          goto skip;
2578 2578  
2579 2579                  /*
2580 2580                   * stop at pages below allowable range
2581 2581                   */
2582 2582                  if (lo_mfn > mfn)
2583 2583                          break;
2584 2584  restart:
2585 2585                  if (pp_last == NULL) {
2586 2586                          /*
2587 2587                           * Check alignment
2588 2588                           */
2589 2589                          tmfn = mfn - (minctg - 1);
2590 2590                          if (pfnalign && tmfn != P2ROUNDUP(tmfn, pfnalign))
2591 2591                                  goto skip; /* not properly aligned */
2592 2592                          /*
2593 2593                           * Check segment
2594 2594                           */
2595 2595                          if ((mfn & pfnseg) < (tmfn & pfnseg))
2596 2596                                  goto skip; /* crosses seg boundary */
2597 2597                          /*
2598 2598                           * Start building page list
2599 2599                           */
2600 2600                          pp_first = pp_last = pp;
2601 2601                          nwanted--;
2602 2602                  } else {
2603 2603                          /*
2604 2604                           * check physical contiguity if required
2605 2605                           */
2606 2606                          if (contig &&
2607 2607                              mfn_list[pp_first->p_pagenum] != mfn + 1) {
2608 2608                                  /*
2609 2609                                   * not a contiguous page, restart list.
2610 2610                                   */
2611 2611                                  pp_last = NULL;
2612 2612                                  nwanted = minctg;
2613 2613                                  goto restart;
2614 2614                          } else { /* add page to list */
2615 2615                                  pp_first = pp;
2616 2616                                  nwanted--;
2617 2617                          }
2618 2618                  }
2619 2619  skip:
2620 2620                  if (pp == *poolp)
2621 2621                          break;
2622 2622          }
2623 2623  
2624 2624          /*
2625 2625           * If we didn't find memory. Try the more constrained pool, then
2626 2626           * sweep free pages into the DMA pool and try again.
2627 2627           */
2628 2628          if (nwanted != 0) {
2629 2629                  mutex_exit(&io_pool_lock);
2630 2630                  /*
2631 2631                   * If we were looking in the less constrained pool and
2632 2632                   * didn't find pages, try the more constrained pool.
2633 2633                   */
2634 2634                  if (poolp == &io_pool_4g) {
2635 2635                          poolp = &io_pool_16m;
2636 2636                          goto try_smaller;
2637 2637                  }
2638 2638                  kmem_reap();
2639 2639                  if (++attempt < 4) {
2640 2640                          /*
2641 2641                           * Grab some more io_pool pages
2642 2642                           */
2643 2643                          (void) populate_io_pool();
2644 2644                          goto try_again; /* go around and retry */
2645 2645                  }
2646 2646                  return (NULL);
2647 2647          }
2648 2648          /*
2649 2649           * Found the pages, now snip them from the list
2650 2650           */
2651 2651          page_io_pool_sub(poolp, pp_first, pp_last);
2652 2652          io_pool_cnt -= minctg;
2653 2653          /*
2654 2654           * reset low water mark
2655 2655           */
2656 2656          if (io_pool_cnt < io_pool_cnt_lowater)
2657 2657                  io_pool_cnt_lowater = io_pool_cnt;
2658 2658          mutex_exit(&io_pool_lock);
2659 2659          return (pp_first);
2660 2660  }
2661 2661  
2662 2662  page_t *
2663 2663  page_swap_with_hypervisor(struct vnode *vp, u_offset_t off, caddr_t vaddr,
2664 2664      ddi_dma_attr_t *mattr, uint_t flags, pgcnt_t minctg)
2665 2665  {
2666 2666          uint_t kflags;
2667 2667          int order, extra, extpages, i, contig, nbits, extents;
2668 2668          page_t *pp, *expp, *pp_first, **pplist = NULL;
2669 2669          mfn_t *mfnlist = NULL;
2670 2670  
2671 2671          contig = flags & PG_PHYSCONTIG;
2672 2672          if (minctg == 1)
2673 2673                  contig = 0;
2674 2674          flags &= ~PG_PHYSCONTIG;
2675 2675          kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP;
2676 2676          /*
2677 2677           * Hypervisor will allocate extents, if we want contig
2678 2678           * pages extent must be >= minctg
2679 2679           */
2680 2680          if (contig) {
2681 2681                  order = highbit(minctg) - 1;
2682 2682                  if (minctg & ((1 << order) - 1))
2683 2683                          order++;
2684 2684                  extpages = 1 << order;
2685 2685          } else {
2686 2686                  order = 0;
2687 2687                  extpages = minctg;
2688 2688          }
2689 2689          if (extpages > minctg) {
2690 2690                  extra = extpages - minctg;
2691 2691                  if (!page_resv(extra, kflags))
2692 2692                          return (NULL);
2693 2693          }
2694 2694          pp_first = NULL;
2695 2695          pplist = kmem_alloc(extpages * sizeof (page_t *), kflags);
2696 2696          if (pplist == NULL)
2697 2697                  goto balloon_fail;
2698 2698          mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags);
2699 2699          if (mfnlist == NULL)
2700 2700                  goto balloon_fail;
2701 2701          pp = page_create_va(vp, off, minctg * PAGESIZE, flags, &kvseg, vaddr);
2702 2702          if (pp == NULL)
2703 2703                  goto balloon_fail;
2704 2704          pp_first = pp;
2705 2705          if (extpages > minctg) {
2706 2706                  /*
2707 2707                   * fill out the rest of extent pages to swap
2708 2708                   * with the hypervisor
2709 2709                   */
2710 2710                  for (i = 0; i < extra; i++) {
2711 2711                          expp = page_create_va(vp,
2712 2712                              (u_offset_t)(uintptr_t)io_pool_kva,
2713 2713                              PAGESIZE, flags, &kvseg, io_pool_kva);
2714 2714                          if (expp == NULL)
2715 2715                                  goto balloon_fail;
2716 2716                          (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD);
2717 2717                          page_io_unlock(expp);
2718 2718                          page_hashout(expp, NULL);
2719 2719                          page_io_lock(expp);
2720 2720                          /*
2721 2721                           * add page to end of list
2722 2722                           */
2723 2723                          expp->p_prev = pp_first->p_prev;
2724 2724                          expp->p_next = pp_first;
2725 2725                          expp->p_prev->p_next = expp;
2726 2726                          pp_first->p_prev = expp;
2727 2727                  }
2728 2728  
2729 2729          }
2730 2730          for (i = 0; i < extpages; i++) {
2731 2731                  pplist[i] = pp;
2732 2732                  pp = pp->p_next;
2733 2733          }
2734 2734          nbits = highbit(mattr->dma_attr_addr_hi);
2735 2735          extents = contig ? 1 : minctg;
2736 2736          if (balloon_replace_pages(extents, pplist, nbits, order,
2737 2737              mfnlist) != extents) {
2738 2738                  if (ioalloc_dbg)
2739 2739                          cmn_err(CE_NOTE, "request to hypervisor"
2740 2740                              " for %d pages, maxaddr %" PRIx64 " failed",
2741 2741                              extpages, mattr->dma_attr_addr_hi);
2742 2742                  goto balloon_fail;
2743 2743          }
2744 2744  
2745 2745          kmem_free(pplist, extpages * sizeof (page_t *));
2746 2746          kmem_free(mfnlist, extpages * sizeof (mfn_t));
2747 2747          /*
2748 2748           * Return any excess pages to free list
2749 2749           */
2750 2750          if (extpages > minctg) {
2751 2751                  for (i = 0; i < extra; i++) {
2752 2752                          pp = pp_first->p_prev;
2753 2753                          page_sub(&pp_first, pp);
2754 2754                          page_io_unlock(pp);
2755 2755                          page_unresv(1);
2756 2756                          page_free(pp, 1);
2757 2757                  }
2758 2758          }
2759 2759          return (pp_first);
2760 2760  balloon_fail:
2761 2761          /*
2762 2762           * Return pages to free list and return failure
2763 2763           */
2764 2764          while (pp_first != NULL) {
2765 2765                  pp = pp_first;
2766 2766                  page_sub(&pp_first, pp);
2767 2767                  page_io_unlock(pp);
2768 2768                  if (pp->p_vnode != NULL)
2769 2769                          page_hashout(pp, NULL);
2770 2770                  page_free(pp, 1);
2771 2771          }
2772 2772          if (pplist)
2773 2773                  kmem_free(pplist, extpages * sizeof (page_t *));
2774 2774          if (mfnlist)
2775 2775                  kmem_free(mfnlist, extpages * sizeof (mfn_t));
2776 2776          page_unresv(extpages - minctg);
2777 2777          return (NULL);
2778 2778  }
2779 2779  
2780 2780  static void
2781 2781  return_partial_alloc(page_t *plist)
2782 2782  {
2783 2783          page_t *pp;
2784 2784  
2785 2785          while (plist != NULL) {
2786 2786                  pp = plist;
2787 2787                  page_sub(&plist, pp);
2788 2788                  page_io_unlock(pp);
2789 2789                  page_destroy_io(pp);
2790 2790          }
2791 2791  }
2792 2792  
2793 2793  static page_t *
2794 2794  page_get_contigpages(
2795 2795          struct vnode    *vp,
2796 2796          u_offset_t      off,
2797 2797          int             *npagesp,
2798 2798          uint_t          flags,
2799 2799          caddr_t         vaddr,
2800 2800          ddi_dma_attr_t  *mattr)
2801 2801  {
2802 2802          mfn_t   max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2803 2803          page_t  *plist; /* list to return */
2804 2804          page_t  *pp, *mcpl;
2805 2805          int     contig, anyaddr, npages, getone = 0;
2806 2806          mfn_t   lo_mfn;
2807 2807          mfn_t   hi_mfn;
2808 2808          pgcnt_t pfnalign = 0;
2809 2809          int     align, sgllen;
2810 2810          uint64_t pfnseg;
2811 2811          pgcnt_t minctg;
2812 2812  
2813 2813          npages = *npagesp;
2814 2814          ASSERT(mattr != NULL);
2815 2815          lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2816 2816          hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2817 2817          sgllen = mattr->dma_attr_sgllen;
2818 2818          pfnseg = mmu_btop(mattr->dma_attr_seg);
2819 2819          align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2820 2820          if (align > MMU_PAGESIZE)
2821 2821                  pfnalign = mmu_btop(align);
2822 2822  
2823 2823          contig = flags & PG_PHYSCONTIG;
2824 2824          if (npages == -1) {
2825 2825                  npages = 1;
2826 2826                  pfnalign = 0;
2827 2827          }
2828 2828          /*
2829 2829           * Clear the contig flag if only one page is needed.
2830 2830           */
2831 2831          if (npages == 1) {
2832 2832                  getone = 1;
2833 2833                  contig = 0;
2834 2834          }
2835 2835  
2836 2836          /*
2837 2837           * Check if any page in the system is fine.
2838 2838           */
2839 2839          anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn;
2840 2840          if (!contig && anyaddr && !pfnalign) {
2841 2841                  flags &= ~PG_PHYSCONTIG;
2842 2842                  plist = page_create_va(vp, off, npages * MMU_PAGESIZE,
2843 2843                      flags, &kvseg, vaddr);
2844 2844                  if (plist != NULL) {
2845 2845                          *npagesp = 0;
2846 2846                          return (plist);
2847 2847                  }
2848 2848          }
2849 2849          plist = NULL;
2850 2850          minctg = howmany(npages, sgllen);
2851 2851          while (npages > sgllen || getone) {
2852 2852                  if (minctg > npages)
2853 2853                          minctg = npages;
2854 2854                  mcpl = NULL;
2855 2855                  /*
2856 2856                   * We could want contig pages with no address range limits.
2857 2857                   */
2858 2858                  if (anyaddr && contig) {
2859 2859                          /*
2860 2860                           * Look for free contig pages to satisfy the request.
2861 2861                           */
2862 2862                          mcpl = find_contig_free(minctg, flags, pfnseg,
2863 2863                              pfnalign);
2864 2864                  }
2865 2865                  /*
2866 2866                   * Try the reserved io pools next
2867 2867                   */
2868 2868                  if (mcpl == NULL)
2869 2869                          mcpl = page_io_pool_alloc(mattr, contig, minctg);
2870 2870                  if (mcpl != NULL) {
2871 2871                          pp = mcpl;
2872 2872                          do {
2873 2873                                  if (!page_hashin(pp, vp, off, NULL)) {
2874 2874                                          panic("page_get_contigpages:"
2875 2875                                              " hashin failed"
2876 2876                                              " pp %p, vp %p, off %llx",
2877 2877                                              (void *)pp, (void *)vp, off);
2878 2878                                  }
2879 2879                                  off += MMU_PAGESIZE;
2880 2880                                  PP_CLRFREE(pp);
2881 2881                                  PP_CLRAGED(pp);
2882 2882                                  page_set_props(pp, P_REF);
2883 2883                                  page_io_lock(pp);
2884 2884                                  pp = pp->p_next;
2885 2885                          } while (pp != mcpl);
2886 2886                  } else {
2887 2887                          /*
2888 2888                           * Hypervisor exchange doesn't handle segment or
2889 2889                           * alignment constraints
2890 2890                           */
2891 2891                          if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi ||
2892 2892                              pfnalign)
2893 2893                                  goto fail;
2894 2894                          /*
2895 2895                           * Try exchanging pages with the hypervisor
2896 2896                           */
2897 2897                          mcpl = page_swap_with_hypervisor(vp, off, vaddr, mattr,
2898 2898                              flags, minctg);
2899 2899                          if (mcpl == NULL)
2900 2900                                  goto fail;
2901 2901                          off += minctg * MMU_PAGESIZE;
2902 2902                  }
2903 2903                  check_dma(mattr, mcpl, minctg);
2904 2904                  /*
2905 2905                   * Here with a minctg run of contiguous pages, add them to the
2906 2906                   * list we will return for this request.
2907 2907                   */
2908 2908                  page_list_concat(&plist, &mcpl);
2909 2909                  npages -= minctg;
2910 2910                  *npagesp = npages;
2911 2911                  sgllen--;
2912 2912                  if (getone)
2913 2913                          break;
2914 2914          }
2915 2915          return (plist);
2916 2916  fail:
2917 2917          return_partial_alloc(plist);
2918 2918          return (NULL);
2919 2919  }
2920 2920  
2921 2921  /*
2922 2922   * Allocator for domain 0 I/O pages. We match the required
2923 2923   * DMA attributes and contiguity constraints.
2924 2924   */
2925 2925  /*ARGSUSED*/
2926 2926  page_t *
2927 2927  page_create_io(
2928 2928          struct vnode    *vp,
2929 2929          u_offset_t      off,
2930 2930          uint_t          bytes,
2931 2931          uint_t          flags,
2932 2932          struct as       *as,
2933 2933          caddr_t         vaddr,
2934 2934          ddi_dma_attr_t  *mattr)
2935 2935  {
2936 2936          page_t  *plist = NULL, *pp;
2937 2937          int     npages = 0, contig, anyaddr, pages_req;
2938 2938          mfn_t   lo_mfn;
2939 2939          mfn_t   hi_mfn;
2940 2940          pgcnt_t pfnalign = 0;
2941 2941          int     align;
2942 2942          int     is_domu = 0;
2943 2943          int     dummy, bytes_got;
2944 2944          mfn_t   max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2945 2945  
2946 2946          ASSERT(mattr != NULL);
2947 2947          lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2948 2948          hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2949 2949          align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2950 2950          if (align > MMU_PAGESIZE)
2951 2951                  pfnalign = mmu_btop(align);
2952 2952  
2953 2953          /*
2954 2954           * Clear the contig flag if only one page is needed or the scatter
2955 2955           * gather list length is >= npages.
2956 2956           */
2957 2957          pages_req = npages = mmu_btopr(bytes);
2958 2958          contig = (flags & PG_PHYSCONTIG);
2959 2959          bytes = P2ROUNDUP(bytes, MMU_PAGESIZE);
2960 2960          if (bytes == MMU_PAGESIZE || mattr->dma_attr_sgllen >= npages)
2961 2961                  contig = 0;
2962 2962  
2963 2963          /*
2964 2964           * Check if any old page in the system is fine.
2965 2965           * DomU should always go down this path.
2966 2966           */
2967 2967          is_domu = !DOMAIN_IS_INITDOMAIN(xen_info);
2968 2968          anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign;
2969 2969          if ((!contig && anyaddr) || is_domu) {
2970 2970                  flags &= ~PG_PHYSCONTIG;
2971 2971                  plist = page_create_va(vp, off, bytes, flags, &kvseg, vaddr);
2972 2972                  if (plist != NULL)
2973 2973                          return (plist);
2974 2974                  else if (is_domu)
2975 2975                          return (NULL); /* no memory available */
2976 2976          }
2977 2977          /*
2978 2978           * DomU should never reach here
2979 2979           */
2980 2980          if (contig) {
2981 2981                  plist = page_get_contigpages(vp, off, &npages, flags, vaddr,
2982 2982                      mattr);
2983 2983                  if (plist == NULL)
2984 2984                          goto fail;
2985 2985                  bytes_got = (pages_req - npages) << MMU_PAGESHIFT;
2986 2986                  vaddr += bytes_got;
2987 2987                  off += bytes_got;
2988 2988                  /*
2989 2989                   * We now have all the contiguous pages we need, but
2990 2990                   * we may still need additional non-contiguous pages.
2991 2991                   */
2992 2992          }
2993 2993          /*
2994 2994           * now loop collecting the requested number of pages, these do
2995 2995           * not have to be contiguous pages but we will use the contig
2996 2996           * page alloc code to get the pages since it will honor any
2997 2997           * other constraints the pages may have.
2998 2998           */
2999 2999          while (npages--) {
3000 3000                  dummy = -1;
3001 3001                  pp = page_get_contigpages(vp, off, &dummy, flags, vaddr, mattr);
3002 3002                  if (pp == NULL)
3003 3003                          goto fail;
3004 3004                  page_add(&plist, pp);
3005 3005                  vaddr += MMU_PAGESIZE;
3006 3006                  off += MMU_PAGESIZE;
3007 3007          }
3008 3008          return (plist);
3009 3009  fail:
3010 3010          /*
3011 3011           * Failed to get enough pages, return ones we did get
3012 3012           */
3013 3013          return_partial_alloc(plist);
3014 3014          return (NULL);
3015 3015  }
3016 3016  
3017 3017  /*
3018 3018   * Lock and return the page with the highest mfn that we can find.  last_mfn
3019 3019   * holds the last one found, so the next search can start from there.  We
3020 3020   * also keep a counter so that we don't loop forever if the machine has no
3021 3021   * free pages.
3022 3022   *
3023 3023   * This is called from the balloon thread to find pages to give away.  new_high
3024 3024   * is used when new mfn's have been added to the system - we will reset our
3025 3025   * search if the new mfn's are higher than our current search position.
3026 3026   */
3027 3027  page_t *
3028 3028  page_get_high_mfn(mfn_t new_high)
3029 3029  {
3030 3030          static mfn_t last_mfn = 0;
3031 3031          pfn_t pfn;
3032 3032          page_t *pp;
3033 3033          ulong_t loop_count = 0;
3034 3034  
3035 3035          if (new_high > last_mfn)
3036 3036                  last_mfn = new_high;
3037 3037  
3038 3038          for (; loop_count < mfn_count; loop_count++, last_mfn--) {
3039 3039                  if (last_mfn == 0) {
3040 3040                          last_mfn = cached_max_mfn;
3041 3041                  }
3042 3042  
3043 3043                  pfn = mfn_to_pfn(last_mfn);
3044 3044                  if (pfn & PFN_IS_FOREIGN_MFN)
3045 3045                          continue;
3046 3046  
3047 3047                  /* See if the page is free.  If so, lock it. */
3048 3048                  pp = page_numtopp_alloc(pfn);
3049 3049                  if (pp == NULL)
3050 3050                          continue;
3051 3051                  PP_CLRFREE(pp);
3052 3052  
3053 3053                  ASSERT(PAGE_EXCL(pp));
3054 3054                  ASSERT(pp->p_vnode == NULL);
3055 3055                  ASSERT(!hat_page_is_mapped(pp));
3056 3056                  last_mfn--;
3057 3057                  return (pp);
3058 3058          }
3059 3059          return (NULL);
3060 3060  }
3061 3061  
3062 3062  #else /* !__xpv */
3063 3063  
3064 3064  /*
3065 3065   * get a page from any list with the given mnode
3066 3066   */
3067 3067  static page_t *
3068 3068  page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags,
3069 3069      int mnode, int mtype, ddi_dma_attr_t *dma_attr)
3070 3070  {
3071 3071          kmutex_t                *pcm;
3072 3072          int                     i;
3073 3073          page_t                  *pp;
3074 3074          page_t                  *first_pp;
3075 3075          uint64_t                pgaddr;
3076 3076          ulong_t                 bin;
3077 3077          int                     mtypestart;
3078 3078          int                     plw_initialized;
3079 3079          page_list_walker_t      plw;
3080 3080  
3081 3081          VM_STAT_ADD(pga_vmstats.pgma_alloc);
3082 3082  
3083 3083          ASSERT((flags & PG_MATCH_COLOR) == 0);
3084 3084          ASSERT(szc == 0);
3085 3085          ASSERT(dma_attr != NULL);
3086 3086  
3087 3087          MTYPE_START(mnode, mtype, flags);
3088 3088          if (mtype < 0) {
3089 3089                  VM_STAT_ADD(pga_vmstats.pgma_allocempty);
3090 3090                  return (NULL);
3091 3091          }
3092 3092  
3093 3093          mtypestart = mtype;
3094 3094  
3095 3095          bin = origbin;
3096 3096  
3097 3097          /*
3098 3098           * check up to page_colors + 1 bins - origbin may be checked twice
3099 3099           * because of BIN_STEP skip
3100 3100           */
3101 3101          do {
3102 3102                  plw_initialized = 0;
3103 3103  
3104 3104                  for (plw.plw_count = 0;
3105 3105                      plw.plw_count < page_colors; plw.plw_count++) {
3106 3106  
3107 3107                          if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL)
3108 3108                                  goto nextfreebin;
3109 3109  
3110 3110                          pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST);
3111 3111                          mutex_enter(pcm);
3112 3112                          pp = PAGE_FREELISTS(mnode, szc, bin, mtype);
3113 3113                          first_pp = pp;
3114 3114                          while (pp != NULL) {
3115 3115                                  if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3116 3116                                      SE_EXCL) == 0) {
3117 3117                                          pp = pp->p_next;
3118 3118                                          if (pp == first_pp) {
3119 3119                                                  pp = NULL;
3120 3120                                          }
3121 3121                                          continue;
3122 3122                                  }
3123 3123  
3124 3124                                  ASSERT(PP_ISFREE(pp));
3125 3125                                  ASSERT(PP_ISAGED(pp));
3126 3126                                  ASSERT(pp->p_vnode == NULL);
3127 3127                                  ASSERT(pp->p_hash == NULL);
3128 3128                                  ASSERT(pp->p_offset == (u_offset_t)-1);
3129 3129                                  ASSERT(pp->p_szc == szc);
3130 3130                                  ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3131 3131                                  /* check if page within DMA attributes */
3132 3132                                  pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3133 3133                                  if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3134 3134                                      (pgaddr + MMU_PAGESIZE - 1 <=
3135 3135                                      dma_attr->dma_attr_addr_hi)) {
3136 3136                                          break;
3137 3137                                  }
3138 3138  
3139 3139                                  /* continue looking */
3140 3140                                  page_unlock(pp);
3141 3141                                  pp = pp->p_next;
3142 3142                                  if (pp == first_pp)
3143 3143                                          pp = NULL;
3144 3144  
3145 3145                          }
3146 3146                          if (pp != NULL) {
3147 3147                                  ASSERT(mtype == PP_2_MTYPE(pp));
3148 3148                                  ASSERT(pp->p_szc == 0);
3149 3149  
3150 3150                                  /* found a page with specified DMA attributes */
3151 3151                                  page_sub(&PAGE_FREELISTS(mnode, szc, bin,
3152 3152                                      mtype), pp);
3153 3153                                  page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
3154 3154  
3155 3155                                  if ((PP_ISFREE(pp) == 0) ||
3156 3156                                      (PP_ISAGED(pp) == 0)) {
3157 3157                                          cmn_err(CE_PANIC, "page %p is not free",
3158 3158                                              (void *)pp);
3159 3159                                  }
3160 3160  
3161 3161                                  mutex_exit(pcm);
3162 3162                                  check_dma(dma_attr, pp, 1);
3163 3163                                  VM_STAT_ADD(pga_vmstats.pgma_allocok);
3164 3164                                  return (pp);
3165 3165                          }
3166 3166                          mutex_exit(pcm);
3167 3167  nextfreebin:
3168 3168                          if (plw_initialized == 0) {
3169 3169                                  page_list_walk_init(szc, 0, bin, 1, 0, &plw);
3170 3170                                  ASSERT(plw.plw_ceq_dif == page_colors);
3171 3171                                  plw_initialized = 1;
3172 3172                          }
3173 3173  
3174 3174                          if (plw.plw_do_split) {
3175 3175                                  pp = page_freelist_split(szc, bin, mnode,
3176 3176                                      mtype,
3177 3177                                      mmu_btop(dma_attr->dma_attr_addr_lo),
3178 3178                                      mmu_btop(dma_attr->dma_attr_addr_hi + 1),
3179 3179                                      &plw);
3180 3180                                  if (pp != NULL) {
3181 3181                                          check_dma(dma_attr, pp, 1);
3182 3182                                          return (pp);
3183 3183                                  }
3184 3184                          }
3185 3185  
3186 3186                          bin = page_list_walk_next_bin(szc, bin, &plw);
3187 3187                  }
3188 3188  
3189 3189                  MTYPE_NEXT(mnode, mtype, flags);
3190 3190          } while (mtype >= 0);
3191 3191  
3192 3192          /* failed to find a page in the freelist; try it in the cachelist */
3193 3193  
3194 3194          /* reset mtype start for cachelist search */
3195 3195          mtype = mtypestart;
3196 3196          ASSERT(mtype >= 0);
3197 3197  
3198 3198          /* start with the bin of matching color */
3199 3199          bin = origbin;
3200 3200  
3201 3201          do {
3202 3202                  for (i = 0; i <= page_colors; i++) {
3203 3203                          if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL)
3204 3204                                  goto nextcachebin;
3205 3205                          pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST);
3206 3206                          mutex_enter(pcm);
3207 3207                          pp = PAGE_CACHELISTS(mnode, bin, mtype);
3208 3208                          first_pp = pp;
3209 3209                          while (pp != NULL) {
3210 3210                                  if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3211 3211                                      SE_EXCL) == 0) {
3212 3212                                          pp = pp->p_next;
3213 3213                                          if (pp == first_pp)
3214 3214                                                  pp = NULL;
3215 3215                                          continue;
3216 3216                                  }
3217 3217                                  ASSERT(pp->p_vnode);
3218 3218                                  ASSERT(PP_ISAGED(pp) == 0);
3219 3219                                  ASSERT(pp->p_szc == 0);
3220 3220                                  ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3221 3221  
3222 3222                                  /* check if page within DMA attributes */
3223 3223  
3224 3224                                  pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3225 3225                                  if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3226 3226                                      (pgaddr + MMU_PAGESIZE - 1 <=
3227 3227                                      dma_attr->dma_attr_addr_hi)) {
3228 3228                                          break;
3229 3229                                  }
3230 3230  
3231 3231                                  /* continue looking */
3232 3232                                  page_unlock(pp);
3233 3233                                  pp = pp->p_next;
3234 3234                                  if (pp == first_pp)
3235 3235                                          pp = NULL;
3236 3236                          }
3237 3237  
3238 3238                          if (pp != NULL) {
3239 3239                                  ASSERT(mtype == PP_2_MTYPE(pp));
3240 3240                                  ASSERT(pp->p_szc == 0);
3241 3241  
3242 3242                                  /* found a page with specified DMA attributes */
3243 3243                                  page_sub(&PAGE_CACHELISTS(mnode, bin,
3244 3244                                      mtype), pp);
3245 3245                                  page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST);
3246 3246  
3247 3247                                  mutex_exit(pcm);
3248 3248                                  ASSERT(pp->p_vnode);
3249 3249                                  ASSERT(PP_ISAGED(pp) == 0);
3250 3250                                  check_dma(dma_attr, pp, 1);
3251 3251                                  VM_STAT_ADD(pga_vmstats.pgma_allocok);
3252 3252                                  return (pp);
3253 3253                          }
3254 3254                          mutex_exit(pcm);
3255 3255  nextcachebin:
3256 3256                          bin += (i == 0) ? BIN_STEP : 1;
3257 3257                          bin &= page_colors_mask;
3258 3258                  }
3259 3259                  MTYPE_NEXT(mnode, mtype, flags);
3260 3260          } while (mtype >= 0);
3261 3261  
3262 3262          VM_STAT_ADD(pga_vmstats.pgma_allocfailed);
3263 3263          return (NULL);
3264 3264  }
3265 3265  
3266 3266  /*
3267 3267   * This function is similar to page_get_freelist()/page_get_cachelist()
3268 3268   * but it searches both the lists to find a page with the specified
3269 3269   * color (or no color) and DMA attributes. The search is done in the
3270 3270   * freelist first and then in the cache list within the highest memory
3271 3271   * range (based on DMA attributes) before searching in the lower
3272 3272   * memory ranges.
3273 3273   *
3274 3274   * Note: This function is called only by page_create_io().
3275 3275   */
3276 3276  /*ARGSUSED*/
3277 3277  static page_t *
3278 3278  page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr,
3279 3279      size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp)
3280 3280  {
3281 3281          uint_t          bin;
3282 3282          int             mtype;
3283 3283          page_t          *pp;
3284 3284          int             n;
3285 3285          int             m;
3286 3286          int             szc;
3287 3287          int             fullrange;
3288 3288          int             mnode;
3289 3289          int             local_failed_stat = 0;
3290 3290          lgrp_mnode_cookie_t     lgrp_cookie;
3291 3291  
3292 3292          VM_STAT_ADD(pga_vmstats.pga_alloc);
3293 3293  
3294 3294          /* only base pagesize currently supported */
3295 3295          if (size != MMU_PAGESIZE)
3296 3296                  return (NULL);
3297 3297  
3298 3298          /*
3299 3299           * If we're passed a specific lgroup, we use it.  Otherwise,
3300 3300           * assume first-touch placement is desired.
3301 3301           */
3302 3302          if (!LGRP_EXISTS(lgrp))
3303 3303                  lgrp = lgrp_home_lgrp();
3304 3304  
3305 3305          /* LINTED */
3306 3306          AS_2_BIN(as, seg, vp, vaddr, bin, 0);
3307 3307  
3308 3308          /*
3309 3309           * Only hold one freelist or cachelist lock at a time, that way we
3310 3310           * can start anywhere and not have to worry about lock
3311 3311           * ordering.
3312 3312           */
3313 3313          if (dma_attr == NULL) {
3314 3314                  n = mtype16m;
3315 3315                  m = mtypetop;
3316 3316                  fullrange = 1;
3317 3317                  VM_STAT_ADD(pga_vmstats.pga_nulldmaattr);
3318 3318          } else {
3319 3319                  pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo);
3320 3320                  pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi);
3321 3321  
3322 3322                  /*
3323 3323                   * We can guarantee alignment only for page boundary.
3324 3324                   */
3325 3325                  if (dma_attr->dma_attr_align > MMU_PAGESIZE)
3326 3326                          return (NULL);
3327 3327  
3328 3328                  /* Sanity check the dma_attr */
3329 3329                  if (pfnlo > pfnhi)
3330 3330                          return (NULL);
3331 3331  
3332 3332                  n = pfn_2_mtype(pfnlo);
3333 3333                  m = pfn_2_mtype(pfnhi);
3334 3334  
3335 3335                  fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) &&
3336 3336                      (pfnhi >= mnoderanges[m].mnr_pfnhi));
3337 3337          }
3338 3338          VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange);
3339 3339  
3340 3340          szc = 0;
3341 3341  
3342 3342          /* cylcing thru mtype handled by RANGE0 if n == mtype16m */
3343 3343          if (n == mtype16m) {
3344 3344                  flags |= PGI_MT_RANGE0;
3345 3345                  n = m;
3346 3346          }
3347 3347  
3348 3348          /*
3349 3349           * Try local memory node first, but try remote if we can't
3350 3350           * get a page of the right color.
3351 3351           */
3352 3352          LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER);
3353 3353          while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) {
3354 3354                  /*
3355 3355                   * allocate pages from high pfn to low.
3356 3356                   */
3357 3357                  mtype = m;
3358 3358                  do {
3359 3359                          if (fullrange != 0) {
3360 3360                                  pp = page_get_mnode_freelist(mnode,
3361 3361                                      bin, mtype, szc, flags);
3362 3362                                  if (pp == NULL) {
3363 3363                                          pp = page_get_mnode_cachelist(
3364 3364                                              bin, flags, mnode, mtype);
3365 3365                                  }
3366 3366                          } else {
3367 3367                                  pp = page_get_mnode_anylist(bin, szc,
3368 3368                                      flags, mnode, mtype, dma_attr);
3369 3369                          }
3370 3370                          if (pp != NULL) {
3371 3371                                  VM_STAT_ADD(pga_vmstats.pga_allocok);
3372 3372                                  check_dma(dma_attr, pp, 1);
3373 3373                                  return (pp);
3374 3374                          }
3375 3375                  } while (mtype != n &&
3376 3376                      (mtype = mnoderanges[mtype].mnr_next) != -1);
3377 3377                  if (!local_failed_stat) {
3378 3378                          lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1);
3379 3379                          local_failed_stat = 1;
3380 3380                  }
3381 3381          }
3382 3382          VM_STAT_ADD(pga_vmstats.pga_allocfailed);
3383 3383  
3384 3384          return (NULL);
3385 3385  }
3386 3386  
3387 3387  /*
3388 3388   * page_create_io()
3389 3389   *
3390 3390   * This function is a copy of page_create_va() with an additional
3391 3391   * argument 'mattr' that specifies DMA memory requirements to
3392 3392   * the page list functions. This function is used by the segkmem
3393 3393   * allocator so it is only to create new pages (i.e PG_EXCL is
3394 3394   * set).
3395 3395   *
3396 3396   * Note: This interface is currently used by x86 PSM only and is
3397 3397   *       not fully specified so the commitment level is only for
3398 3398   *       private interface specific to x86. This interface uses PSM
3399 3399   *       specific page_get_anylist() interface.
3400 3400   */
3401 3401  
3402 3402  #define PAGE_HASH_SEARCH(index, pp, vp, off) { \
3403 3403          for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
3404 3404                  if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
3405 3405                          break; \
3406 3406          } \
3407 3407  }
3408 3408  
3409 3409  
3410 3410  page_t *
3411 3411  page_create_io(
3412 3412          struct vnode    *vp,
3413 3413          u_offset_t      off,
3414 3414          uint_t          bytes,
3415 3415          uint_t          flags,
3416 3416          struct as       *as,
3417 3417          caddr_t         vaddr,
3418 3418          ddi_dma_attr_t  *mattr) /* DMA memory attributes if any */
3419 3419  {
3420 3420          page_t          *plist = NULL;
3421 3421          uint_t          plist_len = 0;
3422 3422          pgcnt_t         npages;
3423 3423          page_t          *npp = NULL;
3424 3424          uint_t          pages_req;
3425 3425          page_t          *pp;
3426 3426          kmutex_t        *phm = NULL;
3427 3427          uint_t          index;
3428 3428  
3429 3429          TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
3430 3430              "page_create_start:vp %p off %llx bytes %u flags %x",
3431 3431              vp, off, bytes, flags);
3432 3432  
3433 3433          ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0);
3434 3434  
3435 3435          pages_req = npages = mmu_btopr(bytes);
3436 3436  
3437 3437          /*
3438 3438           * Do the freemem and pcf accounting.
3439 3439           */
3440 3440          if (!page_create_wait(npages, flags)) {
3441 3441                  return (NULL);
3442 3442          }
3443 3443  
3444 3444          TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
3445 3445              "page_create_success:vp %p off %llx", vp, off);
3446 3446  
3447 3447          /*
3448 3448           * If satisfying this request has left us with too little
3449 3449           * memory, start the wheels turning to get some back.  The
3450 3450           * first clause of the test prevents waking up the pageout
3451 3451           * daemon in situations where it would decide that there's
3452 3452           * nothing to do.
3453 3453           */
3454 3454          if (nscan < desscan && freemem < minfree) {
3455 3455                  TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
3456 3456                      "pageout_cv_signal:freemem %ld", freemem);
3457 3457                  cv_signal(&proc_pageout->p_cv);
3458 3458          }
3459 3459  
3460 3460          if (flags & PG_PHYSCONTIG) {
3461 3461  
3462 3462                  plist = page_get_contigpage(&npages, mattr, 1);
3463 3463                  if (plist == NULL) {
3464 3464                          page_create_putback(npages);
3465 3465                          return (NULL);
3466 3466                  }
3467 3467  
3468 3468                  pp = plist;
3469 3469  
3470 3470                  do {
3471 3471                          if (!page_hashin(pp, vp, off, NULL)) {
3472 3472                                  panic("pg_creat_io: hashin failed %p %p %llx",
3473 3473                                      (void *)pp, (void *)vp, off);
3474 3474                          }
3475 3475                          VM_STAT_ADD(page_create_new);
3476 3476                          off += MMU_PAGESIZE;
3477 3477                          PP_CLRFREE(pp);
3478 3478                          PP_CLRAGED(pp);
3479 3479                          page_set_props(pp, P_REF);
3480 3480                          pp = pp->p_next;
3481 3481                  } while (pp != plist);
3482 3482  
3483 3483                  if (!npages) {
3484 3484                          check_dma(mattr, plist, pages_req);
3485 3485                          return (plist);
3486 3486                  } else {
3487 3487                          vaddr += (pages_req - npages) << MMU_PAGESHIFT;
3488 3488                  }
3489 3489  
3490 3490                  /*
3491 3491                   * fall-thru:
3492 3492                   *
3493 3493                   * page_get_contigpage returns when npages <= sgllen.
3494 3494                   * Grab the rest of the non-contig pages below from anylist.
3495 3495                   */
3496 3496          }
3497 3497  
3498 3498          /*
3499 3499           * Loop around collecting the requested number of pages.
3500 3500           * Most of the time, we have to `create' a new page. With
3501 3501           * this in mind, pull the page off the free list before
3502 3502           * getting the hash lock.  This will minimize the hash
3503 3503           * lock hold time, nesting, and the like.  If it turns
3504 3504           * out we don't need the page, we put it back at the end.
3505 3505           */
3506 3506          while (npages--) {
3507 3507                  phm = NULL;
3508 3508  
3509 3509                  index = PAGE_HASH_FUNC(vp, off);
3510 3510  top:
3511 3511                  ASSERT(phm == NULL);
3512 3512                  ASSERT(index == PAGE_HASH_FUNC(vp, off));
3513 3513                  ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3514 3514  
3515 3515                  if (npp == NULL) {
3516 3516                          /*
3517 3517                           * Try to get the page of any color either from
3518 3518                           * the freelist or from the cache list.
3519 3519                           */
3520 3520                          npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE,
3521 3521                              flags & ~PG_MATCH_COLOR, mattr, NULL);
3522 3522                          if (npp == NULL) {
3523 3523                                  if (mattr == NULL) {
3524 3524                                          /*
3525 3525                                           * Not looking for a special page;
3526 3526                                           * panic!
3527 3527                                           */
3528 3528                                          panic("no page found %d", (int)npages);
3529 3529                                  }
3530 3530                                  /*
3531 3531                                   * No page found! This can happen
3532 3532                                   * if we are looking for a page
3533 3533                                   * within a specific memory range
3534 3534                                   * for DMA purposes. If PG_WAIT is
3535 3535                                   * specified then we wait for a
3536 3536                                   * while and then try again. The
3537 3537                                   * wait could be forever if we
3538 3538                                   * don't get the page(s) we need.
3539 3539                                   *
3540 3540                                   * Note: XXX We really need a mechanism
3541 3541                                   * to wait for pages in the desired
3542 3542                                   * range. For now, we wait for any
3543 3543                                   * pages and see if we can use it.
3544 3544                                   */
3545 3545  
3546 3546                                  if ((mattr != NULL) && (flags & PG_WAIT)) {
3547 3547                                          delay(10);
3548 3548                                          goto top;
3549 3549                                  }
3550 3550                                  goto fail; /* undo accounting stuff */
3551 3551                          }
3552 3552  
3553 3553                          if (PP_ISAGED(npp) == 0) {
3554 3554                                  /*
3555 3555                                   * Since this page came from the
3556 3556                                   * cachelist, we must destroy the
3557 3557                                   * old vnode association.
3558 3558                                   */
3559 3559                                  page_hashout(npp, (kmutex_t *)NULL);
3560 3560                          }
3561 3561                  }
3562 3562  
3563 3563                  /*
3564 3564                   * We own this page!
3565 3565                   */
3566 3566                  ASSERT(PAGE_EXCL(npp));
3567 3567                  ASSERT(npp->p_vnode == NULL);
3568 3568                  ASSERT(!hat_page_is_mapped(npp));
3569 3569                  PP_CLRFREE(npp);
3570 3570                  PP_CLRAGED(npp);
3571 3571  
3572 3572                  /*
3573 3573                   * Here we have a page in our hot little mits and are
3574 3574                   * just waiting to stuff it on the appropriate lists.
3575 3575                   * Get the mutex and check to see if it really does
3576 3576                   * not exist.
3577 3577                   */
3578 3578                  phm = PAGE_HASH_MUTEX(index);
3579 3579                  mutex_enter(phm);
3580 3580                  PAGE_HASH_SEARCH(index, pp, vp, off);
3581 3581                  if (pp == NULL) {
3582 3582                          VM_STAT_ADD(page_create_new);
3583 3583                          pp = npp;
3584 3584                          npp = NULL;
3585 3585                          if (!page_hashin(pp, vp, off, phm)) {
3586 3586                                  /*
3587 3587                                   * Since we hold the page hash mutex and
3588 3588                                   * just searched for this page, page_hashin
3589 3589                                   * had better not fail.  If it does, that
3590 3590                                   * means somethread did not follow the
3591 3591                                   * page hash mutex rules.  Panic now and
3592 3592                                   * get it over with.  As usual, go down
3593 3593                                   * holding all the locks.
3594 3594                                   */
3595 3595                                  ASSERT(MUTEX_HELD(phm));
3596 3596                                  panic("page_create: hashin fail %p %p %llx %p",
3597 3597                                      (void *)pp, (void *)vp, off, (void *)phm);
3598 3598  
3599 3599                          }
3600 3600                          ASSERT(MUTEX_HELD(phm));
3601 3601                          mutex_exit(phm);
3602 3602                          phm = NULL;
3603 3603  
3604 3604                          /*
3605 3605                           * Hat layer locking need not be done to set
3606 3606                           * the following bits since the page is not hashed
3607 3607                           * and was on the free list (i.e., had no mappings).
3608 3608                           *
3609 3609                           * Set the reference bit to protect
3610 3610                           * against immediate pageout
3611 3611                           *
3612 3612                           * XXXmh modify freelist code to set reference
3613 3613                           * bit so we don't have to do it here.
3614 3614                           */
3615 3615                          page_set_props(pp, P_REF);
3616 3616                  } else {
3617 3617                          ASSERT(MUTEX_HELD(phm));
3618 3618                          mutex_exit(phm);
3619 3619                          phm = NULL;
3620 3620                          /*
3621 3621                           * NOTE: This should not happen for pages associated
3622 3622                           *       with kernel vnode 'kvp'.
3623 3623                           */
3624 3624                          /* XX64 - to debug why this happens! */
3625 3625                          ASSERT(!VN_ISKAS(vp));
3626 3626                          if (VN_ISKAS(vp))
3627 3627                                  cmn_err(CE_NOTE,
3628 3628                                      "page_create: page not expected "
3629 3629                                      "in hash list for kernel vnode - pp 0x%p",
3630 3630                                      (void *)pp);
3631 3631                          VM_STAT_ADD(page_create_exists);
3632 3632                          goto fail;
3633 3633                  }
3634 3634  
3635 3635                  /*
3636 3636                   * Got a page!  It is locked.  Acquire the i/o
3637 3637                   * lock since we are going to use the p_next and
3638 3638                   * p_prev fields to link the requested pages together.
3639 3639                   */
3640 3640                  page_io_lock(pp);
3641 3641                  page_add(&plist, pp);
3642 3642                  plist = plist->p_next;
3643 3643                  off += MMU_PAGESIZE;
3644 3644                  vaddr += MMU_PAGESIZE;
3645 3645          }
3646 3646  
3647 3647          check_dma(mattr, plist, pages_req);
3648 3648          return (plist);
3649 3649  
3650 3650  fail:
3651 3651          if (npp != NULL) {
3652 3652                  /*
3653 3653                   * Did not need this page after all.
3654 3654                   * Put it back on the free list.
3655 3655                   */
3656 3656                  VM_STAT_ADD(page_create_putbacks);
3657 3657                  PP_SETFREE(npp);
3658 3658                  PP_SETAGED(npp);
3659 3659                  npp->p_offset = (u_offset_t)-1;
3660 3660                  page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
3661 3661                  page_unlock(npp);
3662 3662          }
3663 3663  
3664 3664          /*
3665 3665           * Give up the pages we already got.
3666 3666           */
3667 3667          while (plist != NULL) {
3668 3668                  pp = plist;
3669 3669                  page_sub(&plist, pp);
3670 3670                  page_io_unlock(pp);
3671 3671                  plist_len++;
3672 3672                  /*LINTED: constant in conditional ctx*/
3673 3673                  VN_DISPOSE(pp, B_INVAL, 0, kcred);
3674 3674          }
3675 3675  
3676 3676          /*
3677 3677           * VN_DISPOSE does freemem accounting for the pages in plist
3678 3678           * by calling page_free. So, we need to undo the pcf accounting
3679 3679           * for only the remaining pages.
3680 3680           */
3681 3681          VM_STAT_ADD(page_create_putbacks);
3682 3682          page_create_putback(pages_req - plist_len);
3683 3683  
3684 3684          return (NULL);
3685 3685  }
3686 3686  #endif /* !__xpv */
3687 3687  
3688 3688  
3689 3689  /*
3690 3690   * Copy the data from the physical page represented by "frompp" to
3691 3691   * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and
3692 3692   * CPU->cpu_caddr2.  It assumes that no one uses either map at interrupt
3693 3693   * level and no one sleeps with an active mapping there.
3694 3694   *
3695 3695   * Note that the ref/mod bits in the page_t's are not affected by
3696 3696   * this operation, hence it is up to the caller to update them appropriately.
3697 3697   */
3698 3698  int
3699 3699  ppcopy(page_t *frompp, page_t *topp)
3700 3700  {
3701 3701          caddr_t         pp_addr1;
3702 3702          caddr_t         pp_addr2;
3703 3703          hat_mempte_t    pte1;
3704 3704          hat_mempte_t    pte2;
3705 3705          kmutex_t        *ppaddr_mutex;
3706 3706          label_t         ljb;
3707 3707          int             ret = 1;
3708 3708  
3709 3709          ASSERT_STACK_ALIGNED();
3710 3710          ASSERT(PAGE_LOCKED(frompp));
3711 3711          ASSERT(PAGE_LOCKED(topp));
3712 3712  
3713 3713          if (kpm_enable) {
3714 3714                  pp_addr1 = hat_kpm_page2va(frompp, 0);
3715 3715                  pp_addr2 = hat_kpm_page2va(topp, 0);
3716 3716                  kpreempt_disable();
3717 3717          } else {
3718 3718                  /*
3719 3719                   * disable pre-emption so that CPU can't change
3720 3720                   */
3721 3721                  kpreempt_disable();
3722 3722  
3723 3723                  pp_addr1 = CPU->cpu_caddr1;
3724 3724                  pp_addr2 = CPU->cpu_caddr2;
3725 3725                  pte1 = CPU->cpu_caddr1pte;
3726 3726                  pte2 = CPU->cpu_caddr2pte;
3727 3727  
3728 3728                  ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3729 3729                  mutex_enter(ppaddr_mutex);
3730 3730  
3731 3731                  hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1,
3732 3732                      PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST);
3733 3733                  hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2,
3734 3734                      PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3735 3735                      HAT_LOAD_NOCONSIST);
3736 3736          }
3737 3737  
3738 3738          if (on_fault(&ljb)) {
3739 3739                  ret = 0;
3740 3740                  goto faulted;
3741 3741          }
3742 3742          if (use_sse_pagecopy)
3743 3743  #ifdef __xpv
3744 3744                  page_copy_no_xmm(pp_addr2, pp_addr1);
3745 3745  #else
3746 3746                  hwblkpagecopy(pp_addr1, pp_addr2);
3747 3747  #endif
3748 3748          else
3749 3749                  bcopy(pp_addr1, pp_addr2, PAGESIZE);
3750 3750  
3751 3751          no_fault();
3752 3752  faulted:
3753 3753          if (!kpm_enable) {
3754 3754  #ifdef __xpv
3755 3755                  /*
3756 3756                   * We can't leave unused mappings laying about under the
3757 3757                   * hypervisor, so blow them away.
3758 3758                   */
3759 3759                  if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0,
3760 3760                      UVMF_INVLPG | UVMF_LOCAL) < 0)
3761 3761                          panic("HYPERVISOR_update_va_mapping() failed");
3762 3762                  if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3763 3763                      UVMF_INVLPG | UVMF_LOCAL) < 0)
3764 3764                          panic("HYPERVISOR_update_va_mapping() failed");
3765 3765  #endif
3766 3766                  mutex_exit(ppaddr_mutex);
3767 3767          }
3768 3768          kpreempt_enable();
3769 3769          return (ret);
3770 3770  }
3771 3771  
3772 3772  void
3773 3773  pagezero(page_t *pp, uint_t off, uint_t len)
3774 3774  {
3775 3775          ASSERT(PAGE_LOCKED(pp));
3776 3776          pfnzero(page_pptonum(pp), off, len);
3777 3777  }
3778 3778  
3779 3779  /*
3780 3780   * Zero the physical page from off to off + len given by pfn
3781 3781   * without changing the reference and modified bits of page.
3782 3782   *
3783 3783   * We use this using CPU private page address #2, see ppcopy() for more info.
3784 3784   * pfnzero() must not be called at interrupt level.
3785 3785   */
3786 3786  void
3787 3787  pfnzero(pfn_t pfn, uint_t off, uint_t len)
3788 3788  {
3789 3789          caddr_t         pp_addr2;
3790 3790          hat_mempte_t    pte2;
3791 3791          kmutex_t        *ppaddr_mutex = NULL;
3792 3792  
3793 3793          ASSERT_STACK_ALIGNED();
3794 3794          ASSERT(len <= MMU_PAGESIZE);
3795 3795          ASSERT(off <= MMU_PAGESIZE);
3796 3796          ASSERT(off + len <= MMU_PAGESIZE);
3797 3797  
3798 3798          if (kpm_enable && !pfn_is_foreign(pfn)) {
3799 3799                  pp_addr2 = hat_kpm_pfn2va(pfn);
3800 3800                  kpreempt_disable();
3801 3801          } else {
3802 3802                  kpreempt_disable();
3803 3803  
3804 3804                  pp_addr2 = CPU->cpu_caddr2;
3805 3805                  pte2 = CPU->cpu_caddr2pte;
3806 3806  
3807 3807                  ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3808 3808                  mutex_enter(ppaddr_mutex);
3809 3809  
3810 3810                  hat_mempte_remap(pfn, pp_addr2, pte2,
3811 3811                      PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3812 3812                      HAT_LOAD_NOCONSIST);
3813 3813          }
3814 3814  
3815 3815          if (use_sse_pagezero) {
3816 3816  #ifdef __xpv
3817 3817                  uint_t rem;
3818 3818  
3819 3819                  /*
3820 3820                   * zero a byte at a time until properly aligned for
3821 3821                   * block_zero_no_xmm().
3822 3822                   */
3823 3823                  while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0)
3824 3824                          pp_addr2[off++] = 0;
3825 3825  
3826 3826                  /*
3827 3827                   * Now use faster block_zero_no_xmm() for any range
3828 3828                   * that is properly aligned and sized.
3829 3829                   */
3830 3830                  rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN));
3831 3831                  len -= rem;
3832 3832                  if (len != 0) {
3833 3833                          block_zero_no_xmm(pp_addr2 + off, len);
3834 3834                          off += len;
3835 3835                  }
3836 3836  
3837 3837                  /*
3838 3838                   * zero remainder with byte stores.
3839 3839                   */
3840 3840                  while (rem-- > 0)
3841 3841                          pp_addr2[off++] = 0;
3842 3842  #else
3843 3843                  hwblkclr(pp_addr2 + off, len);
3844 3844  #endif
3845 3845          } else {
3846 3846                  bzero(pp_addr2 + off, len);
3847 3847          }
3848 3848  
3849 3849          if (!kpm_enable || pfn_is_foreign(pfn)) {
3850 3850  #ifdef __xpv
3851 3851                  /*
3852 3852                   * On the hypervisor this page might get used for a page
3853 3853                   * table before any intervening change to this mapping,
3854 3854                   * so blow it away.
3855 3855                   */
3856 3856                  if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3857 3857                      UVMF_INVLPG) < 0)
3858 3858                          panic("HYPERVISOR_update_va_mapping() failed");
3859 3859  #endif
3860 3860                  mutex_exit(ppaddr_mutex);
3861 3861          }
3862 3862  
3863 3863          kpreempt_enable();
3864 3864  }
3865 3865  
3866 3866  /*
3867 3867   * Platform-dependent page scrub call.
3868 3868   */
3869 3869  void
3870 3870  pagescrub(page_t *pp, uint_t off, uint_t len)
3871 3871  {
3872 3872          /*
3873 3873           * For now, we rely on the fact that pagezero() will
3874 3874           * always clear UEs.
3875 3875           */
3876 3876          pagezero(pp, off, len);
3877 3877  }
3878 3878  
3879 3879  /*
3880 3880   * set up two private addresses for use on a given CPU for use in ppcopy()
3881 3881   */
3882 3882  void
3883 3883  setup_vaddr_for_ppcopy(struct cpu *cpup)
3884 3884  {
3885 3885          void *addr;
3886 3886          hat_mempte_t pte_pa;
3887 3887  
3888 3888          addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3889 3889          pte_pa = hat_mempte_setup(addr);
3890 3890          cpup->cpu_caddr1 = addr;
3891 3891          cpup->cpu_caddr1pte = pte_pa;
3892 3892  
3893 3893          addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3894 3894          pte_pa = hat_mempte_setup(addr);
3895 3895          cpup->cpu_caddr2 = addr;
3896 3896          cpup->cpu_caddr2pte = pte_pa;
3897 3897  
3898 3898          mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL);
3899 3899  }
3900 3900  
3901 3901  /*
3902 3902   * Undo setup_vaddr_for_ppcopy
3903 3903   */
3904 3904  void
3905 3905  teardown_vaddr_for_ppcopy(struct cpu *cpup)
3906 3906  {
3907 3907          mutex_destroy(&cpup->cpu_ppaddr_mutex);
3908 3908  
3909 3909          hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte);
3910 3910          cpup->cpu_caddr2pte = 0;
3911 3911          vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1));
3912 3912          cpup->cpu_caddr2 = 0;
3913 3913  
3914 3914          hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte);
3915 3915          cpup->cpu_caddr1pte = 0;
3916 3916          vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1));
3917 3917          cpup->cpu_caddr1 = 0;
3918 3918  }
3919 3919  
3920 3920  /*
3921 3921   * Function for flushing D-cache when performing module relocations
3922 3922   * to an alternate mapping.  Unnecessary on Intel / AMD platforms.
3923 3923   */
3924 3924  void
3925 3925  dcache_flushall()
3926 3926  {}
3927 3927  
3928 3928  size_t
3929 3929  exec_get_spslew(void)
3930 3930  {
3931 3931          return (0);
3932 3932  }
3933 3933  
3934 3934  /*
3935 3935   * Allocate a memory page.  The argument 'seed' can be any pseudo-random
3936 3936   * number to vary where the pages come from.  This is quite a hacked up
3937 3937   * method -- it works for now, but really needs to be fixed up a bit.
3938 3938   *
3939 3939   * We currently use page_create_va() on the kvp with fake offsets,
3940 3940   * segments and virt address.  This is pretty bogus, but was copied from the
3941 3941   * old hat_i86.c code.  A better approach would be to specify either mnode
3942 3942   * random or mnode local and takes a page from whatever color has the MOST
3943 3943   * available - this would have a minimal impact on page coloring.
3944 3944   */
3945 3945  page_t *
3946 3946  page_get_physical(uintptr_t seed)
3947 3947  {
3948 3948          page_t *pp;
3949 3949          u_offset_t offset;
3950 3950          static struct seg tmpseg;
3951 3951          static uintptr_t ctr = 0;
3952 3952  
3953 3953          /*
3954 3954           * This code is gross, we really need a simpler page allocator.
3955 3955           *
3956 3956           * We need to assign an offset for the page to call page_create_va()
3957 3957           * To avoid conflicts with other pages, we get creative with the offset.
3958 3958           * For 32 bits, we need an offset > 4Gig
3959 3959           * For 64 bits, need an offset somewhere in the VA hole.
3960 3960           */
3961 3961          offset = seed;
3962 3962          if (offset > kernelbase)
3963 3963                  offset -= kernelbase;
3964 3964          offset <<= MMU_PAGESHIFT;
3965 3965  #if defined(__amd64)
3966 3966          offset += mmu.hole_start;       /* something in VA hole */
3967 3967  #else
3968 3968          offset += 1ULL << 40;   /* something > 4 Gig */
3969 3969  #endif
3970 3970  
3971 3971          if (page_resv(1, KM_NOSLEEP) == 0)
3972 3972                  return (NULL);
3973 3973  
3974 3974  #ifdef  DEBUG
3975 3975          pp = page_exists(&kvp, offset);
3976 3976          if (pp != NULL)
3977 3977                  panic("page already exists %p", (void *)pp);
3978 3978  #endif
3979 3979  
3980 3980          pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL,
3981 3981              &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE));   /* changing VA usage */
3982 3982          if (pp != NULL) {
3983 3983                  page_io_unlock(pp);
3984 3984                  page_downgrade(pp);
3985 3985          }
3986 3986          return (pp);
3987 3987  }
  
    | 
      ↓ open down ↓ | 
    3987 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX