Print this page
    
OS-3602 lxbrand LTP recv* tests failing on MSG_ERRQUEUE flag
OS-3600 lxbrand 32bit cannot boot with OS-3594 fix
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Bryan Cantrill <bryan@joyent.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/i86pc/vm/vm_machdep.c
          +++ new/usr/src/uts/i86pc/vm/vm_machdep.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  
    | 
      ↓ open down ↓ | 
    16 lines elided | 
    
      ↑ open up ↑ | 
  
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25   25   * Copyright (c) 2010, Intel Corporation.
  26   26   * All rights reserved.
       27 + * Copyright (c) 2014, Joyent, Inc. All rights reserved.
  27   28   */
  28   29  
  29   30  /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
  30   31  /*      All Rights Reserved   */
  31   32  
  32   33  /*
  33   34   * Portions of this source code were derived from Berkeley 4.3 BSD
  34   35   * under license from the Regents of the University of California.
  35   36   */
  36   37  
  37   38  /*
  38   39   * UNIX machine dependent virtual memory support.
  39   40   */
  40   41  
  41   42  #include <sys/types.h>
  42   43  #include <sys/param.h>
  43   44  #include <sys/systm.h>
  44   45  #include <sys/user.h>
  45   46  #include <sys/proc.h>
  46   47  #include <sys/kmem.h>
  47   48  #include <sys/vmem.h>
  48   49  #include <sys/buf.h>
  49   50  #include <sys/cpuvar.h>
  50   51  #include <sys/lgrp.h>
  51   52  #include <sys/disp.h>
  52   53  #include <sys/vm.h>
  53   54  #include <sys/mman.h>
  54   55  #include <sys/vnode.h>
  55   56  #include <sys/cred.h>
  56   57  #include <sys/exec.h>
  57   58  #include <sys/exechdr.h>
  58   59  #include <sys/debug.h>
  59   60  #include <sys/vmsystm.h>
  60   61  #include <sys/swap.h>
  61   62  #include <sys/dumphdr.h>
  62   63  
  63   64  #include <vm/hat.h>
  64   65  #include <vm/as.h>
  65   66  #include <vm/seg.h>
  66   67  #include <vm/seg_kp.h>
  67   68  #include <vm/seg_vn.h>
  68   69  #include <vm/page.h>
  69   70  #include <vm/seg_kmem.h>
  70   71  #include <vm/seg_kpm.h>
  71   72  #include <vm/vm_dep.h>
  72   73  
  73   74  #include <sys/cpu.h>
  74   75  #include <sys/vm_machparam.h>
  75   76  #include <sys/memlist.h>
  76   77  #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */
  77   78  #include <vm/hat_i86.h>
  78   79  #include <sys/x86_archext.h>
  79   80  #include <sys/elf_386.h>
  80   81  #include <sys/cmn_err.h>
  81   82  #include <sys/archsystm.h>
  82   83  #include <sys/machsystm.h>
  83   84  
  84   85  #include <sys/vtrace.h>
  85   86  #include <sys/ddidmareq.h>
  86   87  #include <sys/promif.h>
  87   88  #include <sys/memnode.h>
  88   89  #include <sys/stack.h>
  89   90  #include <util/qsort.h>
  90   91  #include <sys/taskq.h>
  91   92  
  92   93  #ifdef __xpv
  93   94  
  94   95  #include <sys/hypervisor.h>
  95   96  #include <sys/xen_mmu.h>
  96   97  #include <sys/balloon_impl.h>
  97   98  
  98   99  /*
  99  100   * domain 0 pages usable for DMA are kept pre-allocated and kept in
 100  101   * distinct lists, ordered by increasing mfn.
 101  102   */
 102  103  static kmutex_t io_pool_lock;
 103  104  static kmutex_t contig_list_lock;
 104  105  static page_t *io_pool_4g;      /* pool for 32 bit dma limited devices */
 105  106  static page_t *io_pool_16m;     /* pool for 24 bit dma limited legacy devices */
 106  107  static long io_pool_cnt;
 107  108  static long io_pool_cnt_max = 0;
 108  109  #define DEFAULT_IO_POOL_MIN     128
 109  110  static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN;
 110  111  static long io_pool_cnt_lowater = 0;
 111  112  static long io_pool_shrink_attempts; /* how many times did we try to shrink */
 112  113  static long io_pool_shrinks;    /* how many times did we really shrink */
 113  114  static long io_pool_grows;      /* how many times did we grow */
 114  115  static mfn_t start_mfn = 1;
 115  116  static caddr_t io_pool_kva;     /* use to alloc pages when needed */
 116  117  
 117  118  static int create_contig_pfnlist(uint_t);
 118  119  
 119  120  /*
 120  121   * percentage of phys mem to hold in the i/o pool
 121  122   */
 122  123  #define DEFAULT_IO_POOL_PCT     2
 123  124  static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT;
 124  125  static void page_io_pool_sub(page_t **, page_t *, page_t *);
 125  126  int ioalloc_dbg = 0;
 126  127  
 127  128  #endif /* __xpv */
 128  129  
 129  130  uint_t vac_colors = 1;
 130  131  
 131  132  int largepagesupport = 0;
 132  133  extern uint_t page_create_new;
 133  134  extern uint_t page_create_exists;
 134  135  extern uint_t page_create_putbacks;
 135  136  /*
 136  137   * Allow users to disable the kernel's use of SSE.
 137  138   */
 138  139  extern int use_sse_pagecopy, use_sse_pagezero;
 139  140  
 140  141  /*
 141  142   * combined memory ranges from mnode and memranges[] to manage single
 142  143   * mnode/mtype dimension in the page lists.
 143  144   */
 144  145  typedef struct {
 145  146          pfn_t   mnr_pfnlo;
 146  147          pfn_t   mnr_pfnhi;
 147  148          int     mnr_mnode;
 148  149          int     mnr_memrange;           /* index into memranges[] */
 149  150          int     mnr_next;               /* next lower PA mnoderange */
 150  151          int     mnr_exists;
 151  152          /* maintain page list stats */
 152  153          pgcnt_t mnr_mt_clpgcnt;         /* cache list cnt */
 153  154          pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */
 154  155          pgcnt_t mnr_mt_totcnt;          /* sum of cache and free lists */
 155  156  #ifdef DEBUG
 156  157          struct mnr_mts {                /* mnode/mtype szc stats */
 157  158                  pgcnt_t mnr_mts_pgcnt;
 158  159                  int     mnr_mts_colors;
 159  160                  pgcnt_t *mnr_mtsc_pgcnt;
 160  161          }       *mnr_mts;
 161  162  #endif
 162  163  } mnoderange_t;
 163  164  
 164  165  #define MEMRANGEHI(mtype)                                               \
 165  166          ((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
 166  167  #define MEMRANGELO(mtype)       (memranges[mtype])
 167  168  
 168  169  #define MTYPE_FREEMEM(mt)       (mnoderanges[mt].mnr_mt_totcnt)
 169  170  
 170  171  /*
 171  172   * As the PC architecture evolved memory up was clumped into several
 172  173   * ranges for various historical I/O devices to do DMA.
 173  174   * < 16Meg - ISA bus
 174  175   * < 2Gig - ???
 175  176   * < 4Gig - PCI bus or drivers that don't understand PAE mode
 176  177   *
 177  178   * These are listed in reverse order, so that we can skip over unused
 178  179   * ranges on machines with small memories.
 179  180   *
 180  181   * For now under the Hypervisor, we'll only ever have one memrange.
 181  182   */
 182  183  #define PFN_4GIG        0x100000
 183  184  #define PFN_16MEG       0x1000
 184  185  /* Indices into the memory range (arch_memranges) array. */
 185  186  #define MRI_4G          0
 186  187  #define MRI_2G          1
 187  188  #define MRI_16M         2
 188  189  #define MRI_0           3
 189  190  static pfn_t arch_memranges[NUM_MEM_RANGES] = {
 190  191      PFN_4GIG,   /* pfn range for 4G and above */
 191  192      0x80000,    /* pfn range for 2G-4G */
 192  193      PFN_16MEG,  /* pfn range for 16M-2G */
 193  194      0x00000,    /* pfn range for 0-16M */
 194  195  };
 195  196  pfn_t *memranges = &arch_memranges[0];
 196  197  int nranges = NUM_MEM_RANGES;
 197  198  
 198  199  /*
 199  200   * This combines mem_node_config and memranges into one data
 200  201   * structure to be used for page list management.
 201  202   */
 202  203  mnoderange_t    *mnoderanges;
 203  204  int             mnoderangecnt;
 204  205  int             mtype4g;
 205  206  int             mtype16m;
 206  207  int             mtypetop;       /* index of highest pfn'ed mnoderange */
 207  208  
 208  209  /*
 209  210   * 4g memory management variables for systems with more than 4g of memory:
 210  211   *
 211  212   * physical memory below 4g is required for 32bit dma devices and, currently,
 212  213   * for kmem memory. On systems with more than 4g of memory, the pool of memory
 213  214   * below 4g can be depleted without any paging activity given that there is
 214  215   * likely to be sufficient memory above 4g.
 215  216   *
 216  217   * physmax4g is set true if the largest pfn is over 4g. The rest of the
 217  218   * 4g memory management code is enabled only when physmax4g is true.
 218  219   *
 219  220   * maxmem4g is the count of the maximum number of pages on the page lists
 220  221   * with physical addresses below 4g. It can be a lot less then 4g given that
 221  222   * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
 222  223   * agp aperture etc.
 223  224   *
 224  225   * freemem4g maintains the count of the number of available pages on the
 225  226   * page lists with physical addresses below 4g.
 226  227   *
 227  228   * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
 228  229   * 6% (desfree4gshift = 4) of maxmem4g.
 229  230   *
 230  231   * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
 231  232   * and the amount of physical memory above 4g is greater than freemem4g.
 232  233   * In this case, page_get_* routines will restrict below 4g allocations
 233  234   * for requests that don't specifically require it.
 234  235   */
 235  236  
 236  237  #define DESFREE4G       (maxmem4g >> desfree4gshift)
 237  238  
 238  239  #define RESTRICT4G_ALLOC                                        \
 239  240          (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
 240  241  
 241  242  static pgcnt_t  maxmem4g;
 242  243  static pgcnt_t  freemem4g;
 243  244  static int      physmax4g;
 244  245  static int      desfree4gshift = 4;     /* maxmem4g shift to derive DESFREE4G */
 245  246  
 246  247  /*
 247  248   * 16m memory management:
 248  249   *
 249  250   * reserve some amount of physical memory below 16m for legacy devices.
 250  251   *
 251  252   * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above
 252  253   * 16m or if the 16m pool drops below DESFREE16M.
 253  254   *
 254  255   * In this case, general page allocations via page_get_{free,cache}list
 255  256   * routines will be restricted from allocating from the 16m pool. Allocations
 256  257   * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations
 257  258   * are not restricted.
 258  259   */
 259  260  
 260  261  #define FREEMEM16M      MTYPE_FREEMEM(mtype16m)
 261  262  #define DESFREE16M      desfree16m
 262  263  #define RESTRICT16M_ALLOC(freemem, pgcnt, flags)                \
 263  264          ((freemem != 0) && ((flags & PG_PANIC) == 0) &&         \
 264  265              ((freemem >= (FREEMEM16M)) ||                       \
 265  266              (FREEMEM16M  < (DESFREE16M + pgcnt))))
 266  267  
 267  268  static pgcnt_t  desfree16m = 0x380;
 268  269  
 269  270  /*
 270  271   * This can be patched via /etc/system to allow old non-PAE aware device
 271  272   * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM.
 272  273   */
 273  274  int restricted_kmemalloc = 0;
 274  275  
 275  276  #ifdef VM_STATS
 276  277  struct {
 277  278          ulong_t pga_alloc;
 278  279          ulong_t pga_notfullrange;
 279  280          ulong_t pga_nulldmaattr;
 280  281          ulong_t pga_allocok;
 281  282          ulong_t pga_allocfailed;
 282  283          ulong_t pgma_alloc;
 283  284          ulong_t pgma_allocok;
 284  285          ulong_t pgma_allocfailed;
 285  286          ulong_t pgma_allocempty;
 286  287  } pga_vmstats;
 287  288  #endif
 288  289  
 289  290  uint_t mmu_page_sizes;
 290  291  
 291  292  /* How many page sizes the users can see */
 292  293  uint_t mmu_exported_page_sizes;
 293  294  
 294  295  /* page sizes that legacy applications can see */
 295  296  uint_t mmu_legacy_page_sizes;
 296  297  
 297  298  /*
 298  299   * Number of pages in 1 GB.  Don't enable automatic large pages if we have
 299  300   * fewer than this many pages.
 300  301   */
 301  302  pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
 302  303  pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
 303  304  
 304  305  /*
 305  306   * Maximum and default segment size tunables for user private
 306  307   * and shared anon memory, and user text and initialized data.
 307  308   * These can be patched via /etc/system to allow large pages
 308  309   * to be used for mapping application private and shared anon memory.
 309  310   */
 310  311  size_t mcntl0_lpsize = MMU_PAGESIZE;
 311  312  size_t max_uheap_lpsize = MMU_PAGESIZE;
 312  313  size_t default_uheap_lpsize = MMU_PAGESIZE;
 313  314  size_t max_ustack_lpsize = MMU_PAGESIZE;
 314  315  size_t default_ustack_lpsize = MMU_PAGESIZE;
 315  316  size_t max_privmap_lpsize = MMU_PAGESIZE;
 316  317  size_t max_uidata_lpsize = MMU_PAGESIZE;
 317  318  size_t max_utext_lpsize = MMU_PAGESIZE;
 318  319  size_t max_shm_lpsize = MMU_PAGESIZE;
 319  320  
 320  321  
 321  322  /*
 322  323   * initialized by page_coloring_init().
 323  324   */
 324  325  uint_t  page_colors;
 325  326  uint_t  page_colors_mask;
 326  327  uint_t  page_coloring_shift;
 327  328  int     cpu_page_colors;
 328  329  static uint_t   l2_colors;
 329  330  
 330  331  /*
 331  332   * Page freelists and cachelists are dynamically allocated once mnoderangecnt
 332  333   * and page_colors are calculated from the l2 cache n-way set size.  Within a
 333  334   * mnode range, the page freelist and cachelist are hashed into bins based on
 334  335   * color. This makes it easier to search for a page within a specific memory
 335  336   * range.
 336  337   */
 337  338  #define PAGE_COLORS_MIN 16
 338  339  
 339  340  page_t ****page_freelists;
 340  341  page_t ***page_cachelists;
 341  342  
 342  343  
 343  344  /*
 344  345   * Used by page layer to know about page sizes
 345  346   */
 346  347  hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1];
 347  348  
 348  349  kmutex_t        *fpc_mutex[NPC_MUTEX];
 349  350  kmutex_t        *cpc_mutex[NPC_MUTEX];
 350  351  
 351  352  /* Lock to protect mnoderanges array for memory DR operations. */
 352  353  static kmutex_t mnoderange_lock;
 353  354  
 354  355  /*
 355  356   * Only let one thread at a time try to coalesce large pages, to
 356  357   * prevent them from working against each other.
 357  358   */
 358  359  static kmutex_t contig_lock;
 359  360  #define CONTIG_LOCK()   mutex_enter(&contig_lock);
 360  361  #define CONTIG_UNLOCK() mutex_exit(&contig_lock);
 361  362  
 362  363  #define PFN_16M         (mmu_btop((uint64_t)0x1000000))
 363  364  
 364  365  /*
 365  366   * Return the optimum page size for a given mapping
 366  367   */
 367  368  /*ARGSUSED*/
 368  369  size_t
 369  370  map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
 370  371  {
 371  372          level_t l = 0;
 372  373          size_t pgsz = MMU_PAGESIZE;
 373  374          size_t max_lpsize;
 374  375          uint_t mszc;
 375  376  
 376  377          ASSERT(maptype != MAPPGSZ_VA);
 377  378  
 378  379          if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) {
 379  380                  return (MMU_PAGESIZE);
 380  381          }
 381  382  
 382  383          switch (maptype) {
 383  384          case MAPPGSZ_HEAP:
 384  385          case MAPPGSZ_STK:
 385  386                  max_lpsize = memcntl ? mcntl0_lpsize : (maptype ==
 386  387                      MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize);
 387  388                  if (max_lpsize == MMU_PAGESIZE) {
 388  389                          return (MMU_PAGESIZE);
 389  390                  }
 390  391                  if (len == 0) {
 391  392                          len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase +
 392  393                              p->p_brksize - p->p_bssbase : p->p_stksize;
 393  394                  }
 394  395                  len = (maptype == MAPPGSZ_HEAP) ? MAX(len,
 395  396                      default_uheap_lpsize) : MAX(len, default_ustack_lpsize);
 396  397  
 397  398                  /*
 398  399                   * use the pages size that best fits len
 399  400                   */
 400  401                  for (l = mmu.umax_page_level; l > 0; --l) {
 401  402                          if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) {
 402  403                                  continue;
 403  404                          } else {
 404  405                                  pgsz = LEVEL_SIZE(l);
 405  406                          }
 406  407                          break;
 407  408                  }
 408  409  
 409  410                  mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc :
 410  411                      p->p_stkpageszc);
 411  412                  if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) {
 412  413                          pgsz = hw_page_array[mszc].hp_size;
 413  414                  }
 414  415                  return (pgsz);
 415  416  
 416  417          case MAPPGSZ_ISM:
 417  418                  for (l = mmu.umax_page_level; l > 0; --l) {
 418  419                          if (len >= LEVEL_SIZE(l))
 419  420                                  return (LEVEL_SIZE(l));
 420  421                  }
 421  422                  return (LEVEL_SIZE(0));
 422  423          }
 423  424          return (pgsz);
 424  425  }
 425  426  
 426  427  static uint_t
 427  428  map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize,
 428  429      size_t min_physmem)
 429  430  {
 430  431          caddr_t eaddr = addr + size;
 431  432          uint_t szcvec = 0;
 432  433          caddr_t raddr;
 433  434          caddr_t readdr;
 434  435          size_t  pgsz;
 435  436          int i;
 436  437  
 437  438          if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) {
 438  439                  return (0);
 439  440          }
 440  441  
 441  442          for (i = mmu_exported_page_sizes - 1; i > 0; i--) {
 442  443                  pgsz = page_get_pagesize(i);
 443  444                  if (pgsz > max_lpsize) {
 444  445                          continue;
 445  446                  }
 446  447                  raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz);
 447  448                  readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz);
 448  449                  if (raddr < addr || raddr >= readdr) {
 449  450                          continue;
 450  451                  }
 451  452                  if (P2PHASE((uintptr_t)addr ^ off, pgsz)) {
 452  453                          continue;
 453  454                  }
 454  455                  /*
 455  456                   * Set szcvec to the remaining page sizes.
 456  457                   */
 457  458                  szcvec = ((1 << (i + 1)) - 1) & ~1;
 458  459                  break;
 459  460          }
 460  461          return (szcvec);
 461  462  }
 462  463  
 463  464  /*
 464  465   * Return a bit vector of large page size codes that
 465  466   * can be used to map [addr, addr + len) region.
 466  467   */
 467  468  /*ARGSUSED*/
 468  469  uint_t
 469  470  map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type,
 470  471      int memcntl)
 471  472  {
 472  473          size_t max_lpsize = mcntl0_lpsize;
 473  474  
 474  475          if (mmu.max_page_level == 0)
 475  476                  return (0);
 476  477  
 477  478          if (flags & MAP_TEXT) {
 478  479                  if (!memcntl)
 479  480                          max_lpsize = max_utext_lpsize;
 480  481                  return (map_szcvec(addr, size, off, max_lpsize,
 481  482                      shm_lpg_min_physmem));
 482  483  
 483  484          } else if (flags & MAP_INITDATA) {
 484  485                  if (!memcntl)
 485  486                          max_lpsize = max_uidata_lpsize;
 486  487                  return (map_szcvec(addr, size, off, max_lpsize,
 487  488                      privm_lpg_min_physmem));
 488  489  
 489  490          } else if (type == MAPPGSZC_SHM) {
 490  491                  if (!memcntl)
 491  492                          max_lpsize = max_shm_lpsize;
 492  493                  return (map_szcvec(addr, size, off, max_lpsize,
 493  494                      shm_lpg_min_physmem));
 494  495  
 495  496          } else if (type == MAPPGSZC_HEAP) {
 496  497                  if (!memcntl)
 497  498                          max_lpsize = max_uheap_lpsize;
 498  499                  return (map_szcvec(addr, size, off, max_lpsize,
 499  500                      privm_lpg_min_physmem));
 500  501  
 501  502          } else if (type == MAPPGSZC_STACK) {
 502  503                  if (!memcntl)
 503  504                          max_lpsize = max_ustack_lpsize;
 504  505                  return (map_szcvec(addr, size, off, max_lpsize,
 505  506                      privm_lpg_min_physmem));
 506  507  
 507  508          } else {
 508  509                  if (!memcntl)
 509  510                          max_lpsize = max_privmap_lpsize;
 510  511                  return (map_szcvec(addr, size, off, max_lpsize,
 511  512                      privm_lpg_min_physmem));
 512  513          }
 513  514  }
 514  515  
 515  516  /*
 516  517   * Handle a pagefault.
 517  518   */
 518  519  faultcode_t
 519  520  pagefault(
 520  521          caddr_t addr,
 521  522          enum fault_type type,
 522  523          enum seg_rw rw,
 523  524          int iskernel)
 524  525  {
 525  526          struct as *as;
 526  527          struct hat *hat;
 527  528          struct proc *p;
 528  529          kthread_t *t;
 529  530          faultcode_t res;
 530  531          caddr_t base;
 531  532          size_t len;
 532  533          int err;
 533  534          int mapped_red;
 534  535          uintptr_t ea;
 535  536  
 536  537          ASSERT_STACK_ALIGNED();
 537  538  
 538  539          if (INVALID_VADDR(addr))
 539  540                  return (FC_NOMAP);
 540  541  
 541  542          mapped_red = segkp_map_red();
 542  543  
 543  544          if (iskernel) {
 544  545                  as = &kas;
 545  546                  hat = as->a_hat;
 546  547          } else {
 547  548                  t = curthread;
 548  549                  p = ttoproc(t);
 549  550                  as = p->p_as;
 550  551                  hat = as->a_hat;
 551  552          }
 552  553  
 553  554          /*
 554  555           * Dispatch pagefault.
 555  556           */
 556  557          res = as_fault(hat, as, addr, 1, type, rw);
 557  558  
 558  559          /*
 559  560           * If this isn't a potential unmapped hole in the user's
 560  561           * UNIX data or stack segments, just return status info.
 561  562           */
 562  563          if (res != FC_NOMAP || iskernel)
 563  564                  goto out;
 564  565  
 565  566          /*
 566  567           * Check to see if we happened to faulted on a currently unmapped
 567  568           * part of the UNIX data or stack segments.  If so, create a zfod
 568  569           * mapping there and then try calling the fault routine again.
 569  570           */
 570  571          base = p->p_brkbase;
 571  572          len = p->p_brksize;
 572  573  
 573  574          if (addr < base || addr >= base + len) {                /* data seg? */
 574  575                  base = (caddr_t)p->p_usrstack - p->p_stksize;
 575  576                  len = p->p_stksize;
 576  577                  if (addr < base || addr >= p->p_usrstack) {     /* stack seg? */
 577  578                          /* not in either UNIX data or stack segments */
 578  579                          res = FC_NOMAP;
 579  580                          goto out;
 580  581                  }
 581  582          }
 582  583  
 583  584          /*
 584  585           * the rest of this function implements a 3.X 4.X 5.X compatibility
 585  586           * This code is probably not needed anymore
 586  587           */
 587  588          if (p->p_model == DATAMODEL_ILP32) {
 588  589  
 589  590                  /* expand the gap to the page boundaries on each side */
 590  591                  ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE);
 591  592                  base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE);
 592  593                  len = ea - (uintptr_t)base;
 593  594  
 594  595                  as_rangelock(as);
 595  596                  if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) ==
 596  597                      0) {
 597  598                          err = as_map(as, base, len, segvn_create, zfod_argsp);
 598  599                          as_rangeunlock(as);
 599  600                          if (err) {
 600  601                                  res = FC_MAKE_ERR(err);
 601  602                                  goto out;
 602  603                          }
 603  604                  } else {
 604  605                          /*
 605  606                           * This page is already mapped by another thread after
 606  607                           * we returned from as_fault() above.  We just fall
 607  608                           * through as_fault() below.
 608  609                           */
 609  610                          as_rangeunlock(as);
 610  611                  }
 611  612  
 612  613                  res = as_fault(hat, as, addr, 1, F_INVAL, rw);
 613  614          }
 614  615  
 615  616  out:
  
    | 
      ↓ open down ↓ | 
    579 lines elided | 
    
      ↑ open up ↑ | 
  
 616  617          if (mapped_red)
 617  618                  segkp_unmap_red();
 618  619  
 619  620          return (res);
 620  621  }
 621  622  
 622  623  void
 623  624  map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags)
 624  625  {
 625  626          struct proc *p = curproc;
 626      -        caddr_t userlimit = (flags & _MAP_LOW32) ?
 627      -            (caddr_t)_userlimit32 : p->p_as->a_userlimit;
 628      -
 629      -        map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags);
      627 +        map_addr_proc(addrp, len, off, vacalign,
      628 +            map_userlimit(p, p->p_as, flags), curproc, flags);
 630  629  }
 631  630  
 632  631  /*ARGSUSED*/
 633  632  int
 634  633  map_addr_vacalign_check(caddr_t addr, u_offset_t off)
 635  634  {
 636  635          return (0);
 637  636  }
 638  637  
 639  638  /*
 640  639   * map_addr_proc() is the routine called when the system is to
 641  640   * choose an address for the user.  We will pick an address
 642  641   * range which is the highest available below userlimit.
 643  642   *
 644  643   * Every mapping will have a redzone of a single page on either side of
 645  644   * the request. This is done to leave one page unmapped between segments.
 646  645   * This is not required, but it's useful for the user because if their
 647  646   * program strays across a segment boundary, it will catch a fault
 648  647   * immediately making debugging a little easier.  Currently the redzone
 649  648   * is mandatory.
 650  649   *
 651  650   * addrp is a value/result parameter.
 652  651   *      On input it is a hint from the user to be used in a completely
 653  652   *      machine dependent fashion.  We decide to completely ignore this hint.
 654  653   *      If MAP_ALIGN was specified, addrp contains the minimal alignment, which
 655  654   *      must be some "power of two" multiple of pagesize.
 656  655   *
 657  656   *      On output it is NULL if no address can be found in the current
 658  657   *      processes address space or else an address that is currently
 659  658   *      not mapped for len bytes with a page of red zone on either side.
 660  659   *
 661  660   *      vacalign is not needed on x86 (it's for viturally addressed caches)
 662  661   */
 663  662  /*ARGSUSED*/
 664  663  void
 665  664  map_addr_proc(
 666  665          caddr_t *addrp,
 667  666          size_t len,
 668  667          offset_t off,
 669  668          int vacalign,
 670  669          caddr_t userlimit,
 671  670          struct proc *p,
 672  671          uint_t flags)
 673  672  {
 674  673          struct as *as = p->p_as;
 675  674          caddr_t addr;
 676  675          caddr_t base;
 677  676          size_t slen;
 678  677          size_t align_amount;
 679  678  
 680  679          ASSERT32(userlimit == as->a_userlimit);
 681  680  
 682  681          base = p->p_brkbase;
 683  682  #if defined(__amd64)
 684  683          /*
 685  684           * XX64 Yes, this needs more work.
 686  685           */
 687  686          if (p->p_model == DATAMODEL_NATIVE) {
 688  687                  if (userlimit < as->a_userlimit) {
 689  688                          /*
 690  689                           * This happens when a program wants to map
 691  690                           * something in a range that's accessible to a
 692  691                           * program in a smaller address space.  For example,
 693  692                           * a 64-bit program calling mmap32(2) to guarantee
 694  693                           * that the returned address is below 4Gbytes.
 695  694                           */
 696  695                          ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff));
 697  696  
 698  697                          if (userlimit > base)
 699  698                                  slen = userlimit - base;
 700  699                          else {
 701  700                                  *addrp = NULL;
 702  701                                  return;
 703  702                          }
 704  703                  } else {
 705  704                          /*
 706  705                           * XX64 This layout is probably wrong .. but in
 707  706                           * the event we make the amd64 address space look
 708  707                           * like sparcv9 i.e. with the stack -above- the
 709  708                           * heap, this bit of code might even be correct.
 710  709                           */
 711  710                          slen = p->p_usrstack - base -
 712  711                              ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
 713  712                  }
 714  713          } else
 715  714  #endif
 716  715                  slen = userlimit - base;
 717  716  
 718  717          /* Make len be a multiple of PAGESIZE */
 719  718          len = (len + PAGEOFFSET) & PAGEMASK;
 720  719  
 721  720          /*
 722  721           * figure out what the alignment should be
 723  722           *
 724  723           * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
 725  724           */
 726  725          if (len <= ELF_386_MAXPGSZ) {
 727  726                  /*
 728  727                   * Align virtual addresses to ensure that ELF shared libraries
 729  728                   * are mapped with the appropriate alignment constraints by
 730  729                   * the run-time linker.
 731  730                   */
 732  731                  align_amount = ELF_386_MAXPGSZ;
 733  732          } else {
 734  733                  /*
 735  734                   * For 32-bit processes, only those which have specified
 736  735                   * MAP_ALIGN and an addr will be aligned on a larger page size.
 737  736                   * Not doing so can potentially waste up to 1G of process
 738  737                   * address space.
 739  738                   */
 740  739                  int lvl = (p->p_model == DATAMODEL_ILP32) ? 1 :
 741  740                      mmu.umax_page_level;
 742  741  
 743  742                  while (lvl && len < LEVEL_SIZE(lvl))
 744  743                          --lvl;
 745  744  
 746  745                  align_amount = LEVEL_SIZE(lvl);
 747  746          }
 748  747          if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
 749  748                  align_amount = (uintptr_t)*addrp;
 750  749  
 751  750          ASSERT(ISP2(align_amount));
 752  751          ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
 753  752  
 754  753          off = off & (align_amount - 1);
 755  754          /*
 756  755           * Look for a large enough hole starting below userlimit.
 757  756           * After finding it, use the upper part.
 758  757           */
 759  758          if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
 760  759              PAGESIZE, off) == 0) {
 761  760                  caddr_t as_addr;
 762  761  
 763  762                  /*
 764  763                   * addr is the highest possible address to use since we have
 765  764                   * a PAGESIZE redzone at the beginning and end.
 766  765                   */
 767  766                  addr = base + slen - (PAGESIZE + len);
 768  767                  as_addr = addr;
 769  768                  /*
 770  769                   * Round address DOWN to the alignment amount and
 771  770                   * add the offset in.
 772  771                   * If addr is greater than as_addr, len would not be large
 773  772                   * enough to include the redzone, so we must adjust down
 774  773                   * by the alignment amount.
 775  774                   */
 776  775                  addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
 777  776                  addr += (uintptr_t)off;
 778  777                  if (addr > as_addr) {
 779  778                          addr -= align_amount;
 780  779                  }
 781  780  
 782  781                  ASSERT(addr > base);
 783  782                  ASSERT(addr + len < base + slen);
 784  783                  ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
 785  784                      ((uintptr_t)(off)));
 786  785                  *addrp = addr;
 787  786          } else {
 788  787                  *addrp = NULL;  /* no more virtual space */
 789  788          }
 790  789  }
 791  790  
 792  791  int valid_va_range_aligned_wraparound;
 793  792  
 794  793  /*
 795  794   * Determine whether [*basep, *basep + *lenp) contains a mappable range of
 796  795   * addresses at least "minlen" long, where the base of the range is at "off"
 797  796   * phase from an "align" boundary and there is space for a "redzone"-sized
 798  797   * redzone on either side of the range.  On success, 1 is returned and *basep
 799  798   * and *lenp are adjusted to describe the acceptable range (including
 800  799   * the redzone).  On failure, 0 is returned.
 801  800   */
 802  801  /*ARGSUSED3*/
 803  802  int
 804  803  valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
 805  804      size_t align, size_t redzone, size_t off)
 806  805  {
 807  806          uintptr_t hi, lo;
 808  807          size_t tot_len;
 809  808  
 810  809          ASSERT(align == 0 ? off == 0 : off < align);
 811  810          ASSERT(ISP2(align));
 812  811          ASSERT(align == 0 || align >= PAGESIZE);
 813  812  
 814  813          lo = (uintptr_t)*basep;
 815  814          hi = lo + *lenp;
 816  815          tot_len = minlen + 2 * redzone; /* need at least this much space */
 817  816  
 818  817          /*
 819  818           * If hi rolled over the top, try cutting back.
 820  819           */
 821  820          if (hi < lo) {
 822  821                  *lenp = 0UL - lo - 1UL;
 823  822                  /* See if this really happens. If so, then we figure out why */
 824  823                  valid_va_range_aligned_wraparound++;
 825  824                  hi = lo + *lenp;
 826  825          }
 827  826          if (*lenp < tot_len) {
 828  827                  return (0);
 829  828          }
 830  829  
 831  830  #if defined(__amd64)
 832  831          /*
 833  832           * Deal with a possible hole in the address range between
 834  833           * hole_start and hole_end that should never be mapped.
 835  834           */
 836  835          if (lo < hole_start) {
 837  836                  if (hi > hole_start) {
 838  837                          if (hi < hole_end) {
 839  838                                  hi = hole_start;
 840  839                          } else {
 841  840                                  /* lo < hole_start && hi >= hole_end */
 842  841                                  if (dir == AH_LO) {
 843  842                                          /*
 844  843                                           * prefer lowest range
 845  844                                           */
 846  845                                          if (hole_start - lo >= tot_len)
 847  846                                                  hi = hole_start;
 848  847                                          else if (hi - hole_end >= tot_len)
 849  848                                                  lo = hole_end;
 850  849                                          else
 851  850                                                  return (0);
 852  851                                  } else {
 853  852                                          /*
 854  853                                           * prefer highest range
 855  854                                           */
 856  855                                          if (hi - hole_end >= tot_len)
 857  856                                                  lo = hole_end;
 858  857                                          else if (hole_start - lo >= tot_len)
 859  858                                                  hi = hole_start;
 860  859                                          else
 861  860                                                  return (0);
 862  861                                  }
 863  862                          }
 864  863                  }
 865  864          } else {
 866  865                  /* lo >= hole_start */
 867  866                  if (hi < hole_end)
 868  867                          return (0);
 869  868                  if (lo < hole_end)
 870  869                          lo = hole_end;
 871  870          }
 872  871  #endif
 873  872  
 874  873          if (hi - lo < tot_len)
 875  874                  return (0);
 876  875  
 877  876          if (align > 1) {
 878  877                  uintptr_t tlo = lo + redzone;
 879  878                  uintptr_t thi = hi - redzone;
 880  879                  tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
 881  880                  if (tlo < lo + redzone) {
 882  881                          return (0);
 883  882                  }
 884  883                  if (thi < tlo || thi - tlo < minlen) {
 885  884                          return (0);
 886  885                  }
 887  886          }
 888  887  
 889  888          *basep = (caddr_t)lo;
 890  889          *lenp = hi - lo;
 891  890          return (1);
 892  891  }
 893  892  
 894  893  /*
 895  894   * Determine whether [*basep, *basep + *lenp) contains a mappable range of
 896  895   * addresses at least "minlen" long.  On success, 1 is returned and *basep
 897  896   * and *lenp are adjusted to describe the acceptable range.  On failure, 0
 898  897   * is returned.
 899  898   */
 900  899  int
 901  900  valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
 902  901  {
 903  902          return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
 904  903  }
 905  904  
 906  905  /*
 907  906   * Determine whether [addr, addr+len] are valid user addresses.
 908  907   */
 909  908  /*ARGSUSED*/
 910  909  int
 911  910  valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
 912  911      caddr_t userlimit)
 913  912  {
 914  913          caddr_t eaddr = addr + len;
 915  914  
 916  915          if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
 917  916                  return (RANGE_BADADDR);
 918  917  
 919  918  #if defined(__amd64)
 920  919          /*
 921  920           * Check for the VA hole
 922  921           */
 923  922          if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end)
 924  923                  return (RANGE_BADADDR);
 925  924  #endif
 926  925  
 927  926          return (RANGE_OKAY);
 928  927  }
 929  928  
 930  929  /*
 931  930   * Return 1 if the page frame is onboard memory, else 0.
 932  931   */
 933  932  int
 934  933  pf_is_memory(pfn_t pf)
 935  934  {
 936  935          if (pfn_is_foreign(pf))
 937  936                  return (0);
 938  937          return (address_in_memlist(phys_install, pfn_to_pa(pf), 1));
 939  938  }
 940  939  
 941  940  /*
 942  941   * return the memrange containing pfn
 943  942   */
 944  943  int
 945  944  memrange_num(pfn_t pfn)
 946  945  {
 947  946          int n;
 948  947  
 949  948          for (n = 0; n < nranges - 1; ++n) {
 950  949                  if (pfn >= memranges[n])
 951  950                          break;
 952  951          }
 953  952          return (n);
 954  953  }
 955  954  
 956  955  /*
 957  956   * return the mnoderange containing pfn
 958  957   */
 959  958  /*ARGSUSED*/
 960  959  int
 961  960  pfn_2_mtype(pfn_t pfn)
 962  961  {
 963  962  #if defined(__xpv)
 964  963          return (0);
 965  964  #else
 966  965          int     n;
 967  966  
 968  967          /* Always start from highest pfn and work our way down */
 969  968          for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
 970  969                  if (pfn >= mnoderanges[n].mnr_pfnlo) {
 971  970                          break;
 972  971                  }
 973  972          }
 974  973          return (n);
 975  974  #endif
 976  975  }
 977  976  
 978  977  #if !defined(__xpv)
 979  978  /*
 980  979   * is_contigpage_free:
 981  980   *      returns a page list of contiguous pages. It minimally has to return
 982  981   *      minctg pages. Caller determines minctg based on the scatter-gather
 983  982   *      list length.
 984  983   *
 985  984   *      pfnp is set to the next page frame to search on return.
 986  985   */
 987  986  static page_t *
 988  987  is_contigpage_free(
 989  988          pfn_t *pfnp,
 990  989          pgcnt_t *pgcnt,
 991  990          pgcnt_t minctg,
 992  991          uint64_t pfnseg,
 993  992          int iolock)
 994  993  {
 995  994          int     i = 0;
 996  995          pfn_t   pfn = *pfnp;
 997  996          page_t  *pp;
 998  997          page_t  *plist = NULL;
 999  998  
1000  999          /*
1001 1000           * fail if pfn + minctg crosses a segment boundary.
1002 1001           * Adjust for next starting pfn to begin at segment boundary.
1003 1002           */
1004 1003  
1005 1004          if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) {
1006 1005                  *pfnp = roundup(*pfnp, pfnseg + 1);
1007 1006                  return (NULL);
1008 1007          }
1009 1008  
1010 1009          do {
1011 1010  retry:
1012 1011                  pp = page_numtopp_nolock(pfn + i);
1013 1012                  if ((pp == NULL) || IS_DUMP_PAGE(pp) ||
1014 1013                      (page_trylock(pp, SE_EXCL) == 0)) {
1015 1014                          (*pfnp)++;
1016 1015                          break;
1017 1016                  }
1018 1017                  if (page_pptonum(pp) != pfn + i) {
1019 1018                          page_unlock(pp);
1020 1019                          goto retry;
1021 1020                  }
1022 1021  
1023 1022                  if (!(PP_ISFREE(pp))) {
1024 1023                          page_unlock(pp);
1025 1024                          (*pfnp)++;
1026 1025                          break;
1027 1026                  }
1028 1027  
1029 1028                  if (!PP_ISAGED(pp)) {
1030 1029                          page_list_sub(pp, PG_CACHE_LIST);
1031 1030                          page_hashout(pp, (kmutex_t *)NULL);
1032 1031                  } else {
1033 1032                          page_list_sub(pp, PG_FREE_LIST);
1034 1033                  }
1035 1034  
1036 1035                  if (iolock)
1037 1036                          page_io_lock(pp);
1038 1037                  page_list_concat(&plist, &pp);
1039 1038  
1040 1039                  /*
1041 1040                   * exit loop when pgcnt satisfied or segment boundary reached.
1042 1041                   */
1043 1042  
1044 1043          } while ((++i < *pgcnt) && ((pfn + i) & pfnseg));
1045 1044  
1046 1045          *pfnp += i;             /* set to next pfn to search */
1047 1046  
1048 1047          if (i >= minctg) {
1049 1048                  *pgcnt -= i;
1050 1049                  return (plist);
1051 1050          }
1052 1051  
1053 1052          /*
1054 1053           * failure: minctg not satisfied.
1055 1054           *
1056 1055           * if next request crosses segment boundary, set next pfn
1057 1056           * to search from the segment boundary.
1058 1057           */
1059 1058          if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg))
1060 1059                  *pfnp = roundup(*pfnp, pfnseg + 1);
1061 1060  
1062 1061          /* clean up any pages already allocated */
1063 1062  
1064 1063          while (plist) {
1065 1064                  pp = plist;
1066 1065                  page_sub(&plist, pp);
1067 1066                  page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
1068 1067                  if (iolock)
1069 1068                          page_io_unlock(pp);
1070 1069                  page_unlock(pp);
1071 1070          }
1072 1071  
1073 1072          return (NULL);
1074 1073  }
1075 1074  #endif  /* !__xpv */
1076 1075  
1077 1076  /*
1078 1077   * verify that pages being returned from allocator have correct DMA attribute
1079 1078   */
1080 1079  #ifndef DEBUG
1081 1080  #define check_dma(a, b, c) (void)(0)
1082 1081  #else
1083 1082  static void
1084 1083  check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt)
1085 1084  {
1086 1085          if (dma_attr == NULL)
1087 1086                  return;
1088 1087  
1089 1088          while (cnt-- > 0) {
1090 1089                  if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) <
1091 1090                      dma_attr->dma_attr_addr_lo)
1092 1091                          panic("PFN (pp=%p) below dma_attr_addr_lo", (void *)pp);
1093 1092                  if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >=
1094 1093                      dma_attr->dma_attr_addr_hi)
1095 1094                          panic("PFN (pp=%p) above dma_attr_addr_hi", (void *)pp);
1096 1095                  pp = pp->p_next;
1097 1096          }
1098 1097  }
1099 1098  #endif
1100 1099  
1101 1100  #if !defined(__xpv)
1102 1101  static page_t *
1103 1102  page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock)
1104 1103  {
1105 1104          pfn_t           pfn;
1106 1105          int             sgllen;
1107 1106          uint64_t        pfnseg;
1108 1107          pgcnt_t         minctg;
1109 1108          page_t          *pplist = NULL, *plist;
1110 1109          uint64_t        lo, hi;
1111 1110          pgcnt_t         pfnalign = 0;
1112 1111          static pfn_t    startpfn;
1113 1112          static pgcnt_t  lastctgcnt;
1114 1113          uintptr_t       align;
1115 1114  
1116 1115          CONTIG_LOCK();
1117 1116  
1118 1117          if (mattr) {
1119 1118                  lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET));
1120 1119                  hi = mmu_btop(mattr->dma_attr_addr_hi);
1121 1120                  if (hi >= physmax)
1122 1121                          hi = physmax - 1;
1123 1122                  sgllen = mattr->dma_attr_sgllen;
1124 1123                  pfnseg = mmu_btop(mattr->dma_attr_seg);
1125 1124  
1126 1125                  align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
1127 1126                  if (align > MMU_PAGESIZE)
1128 1127                          pfnalign = mmu_btop(align);
1129 1128  
1130 1129                  /*
1131 1130                   * in order to satisfy the request, must minimally
1132 1131                   * acquire minctg contiguous pages
1133 1132                   */
1134 1133                  minctg = howmany(*pgcnt, sgllen);
1135 1134  
1136 1135                  ASSERT(hi >= lo);
1137 1136  
1138 1137                  /*
1139 1138                   * start from where last searched if the minctg >= lastctgcnt
1140 1139                   */
1141 1140                  if (minctg < lastctgcnt || startpfn < lo || startpfn > hi)
1142 1141                          startpfn = lo;
1143 1142          } else {
1144 1143                  hi = physmax - 1;
1145 1144                  lo = 0;
1146 1145                  sgllen = 1;
1147 1146                  pfnseg = mmu.highest_pfn;
1148 1147                  minctg = *pgcnt;
1149 1148  
1150 1149                  if (minctg < lastctgcnt)
1151 1150                          startpfn = lo;
1152 1151          }
1153 1152          lastctgcnt = minctg;
1154 1153  
1155 1154          ASSERT(pfnseg + 1 >= (uint64_t)minctg);
1156 1155  
1157 1156          /* conserve 16m memory - start search above 16m when possible */
1158 1157          if (hi > PFN_16M && startpfn < PFN_16M)
1159 1158                  startpfn = PFN_16M;
1160 1159  
1161 1160          pfn = startpfn;
1162 1161          if (pfnalign)
1163 1162                  pfn = P2ROUNDUP(pfn, pfnalign);
1164 1163  
1165 1164          while (pfn + minctg - 1 <= hi) {
1166 1165  
1167 1166                  plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1168 1167                  if (plist) {
1169 1168                          page_list_concat(&pplist, &plist);
1170 1169                          sgllen--;
1171 1170                          /*
1172 1171                           * return when contig pages no longer needed
1173 1172                           */
1174 1173                          if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1175 1174                                  startpfn = pfn;
1176 1175                                  CONTIG_UNLOCK();
1177 1176                                  check_dma(mattr, pplist, *pgcnt);
1178 1177                                  return (pplist);
1179 1178                          }
1180 1179                          minctg = howmany(*pgcnt, sgllen);
1181 1180                  }
1182 1181                  if (pfnalign)
1183 1182                          pfn = P2ROUNDUP(pfn, pfnalign);
1184 1183          }
1185 1184  
1186 1185          /* cannot find contig pages in specified range */
1187 1186          if (startpfn == lo) {
1188 1187                  CONTIG_UNLOCK();
1189 1188                  return (NULL);
1190 1189          }
1191 1190  
1192 1191          /* did not start with lo previously */
1193 1192          pfn = lo;
1194 1193          if (pfnalign)
1195 1194                  pfn = P2ROUNDUP(pfn, pfnalign);
1196 1195  
1197 1196          /* allow search to go above startpfn */
1198 1197          while (pfn < startpfn) {
1199 1198  
1200 1199                  plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1201 1200                  if (plist != NULL) {
1202 1201  
1203 1202                          page_list_concat(&pplist, &plist);
1204 1203                          sgllen--;
1205 1204  
1206 1205                          /*
1207 1206                           * return when contig pages no longer needed
1208 1207                           */
1209 1208                          if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1210 1209                                  startpfn = pfn;
1211 1210                                  CONTIG_UNLOCK();
1212 1211                                  check_dma(mattr, pplist, *pgcnt);
1213 1212                                  return (pplist);
1214 1213                          }
1215 1214                          minctg = howmany(*pgcnt, sgllen);
1216 1215                  }
1217 1216                  if (pfnalign)
1218 1217                          pfn = P2ROUNDUP(pfn, pfnalign);
1219 1218          }
1220 1219          CONTIG_UNLOCK();
1221 1220          return (NULL);
1222 1221  }
1223 1222  #endif  /* !__xpv */
1224 1223  
1225 1224  /*
1226 1225   * mnode_range_cnt() calculates the number of memory ranges for mnode and
1227 1226   * memranges[]. Used to determine the size of page lists and mnoderanges.
1228 1227   */
1229 1228  int
1230 1229  mnode_range_cnt(int mnode)
1231 1230  {
1232 1231  #if defined(__xpv)
1233 1232          ASSERT(mnode == 0);
1234 1233          return (1);
1235 1234  #else   /* __xpv */
1236 1235          int     mri;
1237 1236          int     mnrcnt = 0;
1238 1237  
1239 1238          if (mem_node_config[mnode].exists != 0) {
1240 1239                  mri = nranges - 1;
1241 1240  
1242 1241                  /* find the memranges index below contained in mnode range */
1243 1242  
1244 1243                  while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1245 1244                          mri--;
1246 1245  
1247 1246                  /*
1248 1247                   * increment mnode range counter when memranges or mnode
1249 1248                   * boundary is reached.
1250 1249                   */
1251 1250                  while (mri >= 0 &&
1252 1251                      mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1253 1252                          mnrcnt++;
1254 1253                          if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1255 1254                                  mri--;
1256 1255                          else
1257 1256                                  break;
1258 1257                  }
1259 1258          }
1260 1259          ASSERT(mnrcnt <= MAX_MNODE_MRANGES);
1261 1260          return (mnrcnt);
1262 1261  #endif  /* __xpv */
1263 1262  }
1264 1263  
1265 1264  /*
1266 1265   * mnode_range_setup() initializes mnoderanges.
1267 1266   */
1268 1267  void
1269 1268  mnode_range_setup(mnoderange_t *mnoderanges)
1270 1269  {
1271 1270          mnoderange_t *mp = mnoderanges;
1272 1271          int     mnode, mri;
1273 1272          int     mindex = 0;     /* current index into mnoderanges array */
1274 1273          int     i, j;
1275 1274          pfn_t   hipfn;
1276 1275          int     last, hi;
1277 1276  
1278 1277          for (mnode = 0; mnode < max_mem_nodes; mnode++) {
1279 1278                  if (mem_node_config[mnode].exists == 0)
1280 1279                          continue;
1281 1280  
1282 1281                  mri = nranges - 1;
1283 1282  
1284 1283                  while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1285 1284                          mri--;
1286 1285  
1287 1286                  while (mri >= 0 && mem_node_config[mnode].physmax >=
1288 1287                      MEMRANGELO(mri)) {
1289 1288                          mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri),
1290 1289                              mem_node_config[mnode].physbase);
1291 1290                          mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri),
1292 1291                              mem_node_config[mnode].physmax);
1293 1292                          mnoderanges->mnr_mnode = mnode;
1294 1293                          mnoderanges->mnr_memrange = mri;
1295 1294                          mnoderanges->mnr_exists = 1;
1296 1295                          mnoderanges++;
1297 1296                          mindex++;
1298 1297                          if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1299 1298                                  mri--;
1300 1299                          else
1301 1300                                  break;
1302 1301                  }
1303 1302          }
1304 1303  
1305 1304          /*
1306 1305           * For now do a simple sort of the mnoderanges array to fill in
1307 1306           * the mnr_next fields.  Since mindex is expected to be relatively
1308 1307           * small, using a simple O(N^2) algorithm.
1309 1308           */
1310 1309          for (i = 0; i < mindex; i++) {
1311 1310                  if (mp[i].mnr_pfnlo == 0)       /* find lowest */
1312 1311                          break;
1313 1312          }
1314 1313          ASSERT(i < mindex);
1315 1314          last = i;
1316 1315          mtype16m = last;
1317 1316          mp[last].mnr_next = -1;
1318 1317          for (i = 0; i < mindex - 1; i++) {
1319 1318                  hipfn = (pfn_t)(-1);
1320 1319                  hi = -1;
1321 1320                  /* find next highest mnode range */
1322 1321                  for (j = 0; j < mindex; j++) {
1323 1322                          if (mp[j].mnr_pfnlo > mp[last].mnr_pfnlo &&
1324 1323                              mp[j].mnr_pfnlo < hipfn) {
1325 1324                                  hipfn = mp[j].mnr_pfnlo;
1326 1325                                  hi = j;
1327 1326                          }
1328 1327                  }
1329 1328                  mp[hi].mnr_next = last;
1330 1329                  last = hi;
1331 1330          }
1332 1331          mtypetop = last;
1333 1332  }
1334 1333  
1335 1334  #ifndef __xpv
1336 1335  /*
1337 1336   * Update mnoderanges for memory hot-add DR operations.
1338 1337   */
1339 1338  static void
1340 1339  mnode_range_add(int mnode)
1341 1340  {
1342 1341          int     *prev;
1343 1342          int     n, mri;
1344 1343          pfn_t   start, end;
1345 1344          extern  void membar_sync(void);
1346 1345  
1347 1346          ASSERT(0 <= mnode && mnode < max_mem_nodes);
1348 1347          ASSERT(mem_node_config[mnode].exists);
1349 1348          start = mem_node_config[mnode].physbase;
1350 1349          end = mem_node_config[mnode].physmax;
1351 1350          ASSERT(start <= end);
1352 1351          mutex_enter(&mnoderange_lock);
1353 1352  
1354 1353  #ifdef  DEBUG
1355 1354          /* Check whether it interleaves with other memory nodes. */
1356 1355          for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1357 1356                  ASSERT(mnoderanges[n].mnr_exists);
1358 1357                  if (mnoderanges[n].mnr_mnode == mnode)
1359 1358                          continue;
1360 1359                  ASSERT(start > mnoderanges[n].mnr_pfnhi ||
1361 1360                      end < mnoderanges[n].mnr_pfnlo);
1362 1361          }
1363 1362  #endif  /* DEBUG */
1364 1363  
1365 1364          mri = nranges - 1;
1366 1365          while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1367 1366                  mri--;
1368 1367          while (mri >= 0 && mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1369 1368                  /* Check whether mtype already exists. */
1370 1369                  for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1371 1370                          if (mnoderanges[n].mnr_mnode == mnode &&
1372 1371                              mnoderanges[n].mnr_memrange == mri) {
1373 1372                                  mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri),
1374 1373                                      start);
1375 1374                                  mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri),
1376 1375                                      end);
1377 1376                                  break;
1378 1377                          }
1379 1378                  }
1380 1379  
1381 1380                  /* Add a new entry if it doesn't exist yet. */
1382 1381                  if (n == -1) {
1383 1382                          /* Try to find an unused entry in mnoderanges array. */
1384 1383                          for (n = 0; n < mnoderangecnt; n++) {
1385 1384                                  if (mnoderanges[n].mnr_exists == 0)
1386 1385                                          break;
1387 1386                          }
1388 1387                          ASSERT(n < mnoderangecnt);
1389 1388                          mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri), start);
1390 1389                          mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri), end);
1391 1390                          mnoderanges[n].mnr_mnode = mnode;
1392 1391                          mnoderanges[n].mnr_memrange = mri;
1393 1392                          mnoderanges[n].mnr_exists = 1;
1394 1393                          /* Page 0 should always be present. */
1395 1394                          for (prev = &mtypetop;
1396 1395                              mnoderanges[*prev].mnr_pfnlo > start;
1397 1396                              prev = &mnoderanges[*prev].mnr_next) {
1398 1397                                  ASSERT(mnoderanges[*prev].mnr_next >= 0);
1399 1398                                  ASSERT(mnoderanges[*prev].mnr_pfnlo > end);
1400 1399                          }
1401 1400                          mnoderanges[n].mnr_next = *prev;
1402 1401                          membar_sync();
1403 1402                          *prev = n;
1404 1403                  }
1405 1404  
1406 1405                  if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1407 1406                          mri--;
1408 1407                  else
1409 1408                          break;
1410 1409          }
1411 1410  
1412 1411          mutex_exit(&mnoderange_lock);
1413 1412  }
1414 1413  
1415 1414  /*
1416 1415   * Update mnoderanges for memory hot-removal DR operations.
1417 1416   */
1418 1417  static void
1419 1418  mnode_range_del(int mnode)
1420 1419  {
1421 1420          _NOTE(ARGUNUSED(mnode));
1422 1421          ASSERT(0 <= mnode && mnode < max_mem_nodes);
1423 1422          /* TODO: support deletion operation. */
1424 1423          ASSERT(0);
1425 1424  }
1426 1425  
1427 1426  void
1428 1427  plat_slice_add(pfn_t start, pfn_t end)
1429 1428  {
1430 1429          mem_node_add_slice(start, end);
1431 1430          if (plat_dr_enabled()) {
1432 1431                  mnode_range_add(PFN_2_MEM_NODE(start));
1433 1432          }
1434 1433  }
1435 1434  
1436 1435  void
1437 1436  plat_slice_del(pfn_t start, pfn_t end)
1438 1437  {
1439 1438          ASSERT(PFN_2_MEM_NODE(start) == PFN_2_MEM_NODE(end));
1440 1439          ASSERT(plat_dr_enabled());
1441 1440          mnode_range_del(PFN_2_MEM_NODE(start));
1442 1441          mem_node_del_slice(start, end);
1443 1442  }
1444 1443  #endif  /* __xpv */
1445 1444  
1446 1445  /*ARGSUSED*/
1447 1446  int
1448 1447  mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz)
1449 1448  {
1450 1449          int mtype = mtypetop;
1451 1450  
1452 1451  #if !defined(__xpv)
1453 1452  #if defined(__i386)
1454 1453          /*
1455 1454           * set the mtype range
1456 1455           * - kmem requests need to be below 4g if restricted_kmemalloc is set.
1457 1456           * - for non kmem requests, set range to above 4g if memory below 4g
1458 1457           * runs low.
1459 1458           */
1460 1459          if (restricted_kmemalloc && VN_ISKAS(vp) &&
1461 1460              (caddr_t)(vaddr) >= kernelheap &&
1462 1461              (caddr_t)(vaddr) < ekernelheap) {
1463 1462                  ASSERT(physmax4g);
1464 1463                  mtype = mtype4g;
1465 1464                  if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz),
1466 1465                      btop(pgsz), *flags)) {
1467 1466                          *flags |= PGI_MT_RANGE16M;
1468 1467                  } else {
1469 1468                          VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1470 1469                          VM_STAT_COND_ADD((*flags & PG_PANIC),
1471 1470                              vmm_vmstats.pgpanicalloc);
1472 1471                          *flags |= PGI_MT_RANGE0;
1473 1472                  }
1474 1473                  return (mtype);
1475 1474          }
1476 1475  #endif  /* __i386 */
1477 1476  
1478 1477          if (RESTRICT4G_ALLOC) {
1479 1478                  VM_STAT_ADD(vmm_vmstats.restrict4gcnt);
1480 1479                  /* here only for > 4g systems */
1481 1480                  *flags |= PGI_MT_RANGE4G;
1482 1481          } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) {
1483 1482                  *flags |= PGI_MT_RANGE16M;
1484 1483          } else {
1485 1484                  VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1486 1485                  VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc);
1487 1486                  *flags |= PGI_MT_RANGE0;
1488 1487          }
1489 1488  #endif /* !__xpv */
1490 1489          return (mtype);
1491 1490  }
1492 1491  
1493 1492  
1494 1493  /* mtype init for page_get_replacement_page */
1495 1494  /*ARGSUSED*/
1496 1495  int
1497 1496  mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt)
1498 1497  {
1499 1498          int mtype = mtypetop;
1500 1499  #if !defined(__xpv)
1501 1500          if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) {
1502 1501                  *flags |= PGI_MT_RANGE16M;
1503 1502          } else {
1504 1503                  VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1505 1504                  *flags |= PGI_MT_RANGE0;
1506 1505          }
1507 1506  #endif
1508 1507          return (mtype);
1509 1508  }
1510 1509  
1511 1510  /*
1512 1511   * Determine if the mnode range specified in mtype contains memory belonging
1513 1512   * to memory node mnode.  If flags & PGI_MT_RANGE is set then mtype contains
1514 1513   * the range from high pfn to 0, 16m or 4g.
1515 1514   *
1516 1515   * Return first mnode range type index found otherwise return -1 if none found.
1517 1516   */
1518 1517  int
1519 1518  mtype_func(int mnode, int mtype, uint_t flags)
1520 1519  {
1521 1520          if (flags & PGI_MT_RANGE) {
1522 1521                  int     mnr_lim = MRI_0;
1523 1522  
1524 1523                  if (flags & PGI_MT_NEXT) {
1525 1524                          mtype = mnoderanges[mtype].mnr_next;
1526 1525                  }
1527 1526                  if (flags & PGI_MT_RANGE4G)
1528 1527                          mnr_lim = MRI_4G;       /* exclude 0-4g range */
1529 1528                  else if (flags & PGI_MT_RANGE16M)
1530 1529                          mnr_lim = MRI_16M;      /* exclude 0-16m range */
1531 1530                  while (mtype != -1 &&
1532 1531                      mnoderanges[mtype].mnr_memrange <= mnr_lim) {
1533 1532                          if (mnoderanges[mtype].mnr_mnode == mnode)
1534 1533                                  return (mtype);
1535 1534                          mtype = mnoderanges[mtype].mnr_next;
1536 1535                  }
1537 1536          } else if (mnoderanges[mtype].mnr_mnode == mnode) {
1538 1537                  return (mtype);
1539 1538          }
1540 1539          return (-1);
1541 1540  }
1542 1541  
1543 1542  /*
1544 1543   * Update the page list max counts with the pfn range specified by the
1545 1544   * input parameters.
1546 1545   */
1547 1546  void
1548 1547  mtype_modify_max(pfn_t startpfn, long cnt)
1549 1548  {
1550 1549          int             mtype;
1551 1550          pgcnt_t         inc;
1552 1551          spgcnt_t        scnt = (spgcnt_t)(cnt);
1553 1552          pgcnt_t         acnt = ABS(scnt);
1554 1553          pfn_t           endpfn = startpfn + acnt;
1555 1554          pfn_t           pfn, lo;
1556 1555  
1557 1556          if (!physmax4g)
1558 1557                  return;
1559 1558  
1560 1559          mtype = mtypetop;
1561 1560          for (pfn = endpfn; pfn > startpfn; ) {
1562 1561                  ASSERT(mtype != -1);
1563 1562                  lo = mnoderanges[mtype].mnr_pfnlo;
1564 1563                  if (pfn > lo) {
1565 1564                          if (startpfn >= lo) {
1566 1565                                  inc = pfn - startpfn;
1567 1566                          } else {
1568 1567                                  inc = pfn - lo;
1569 1568                          }
1570 1569                          if (mnoderanges[mtype].mnr_memrange != MRI_4G) {
1571 1570                                  if (scnt > 0)
1572 1571                                          maxmem4g += inc;
1573 1572                                  else
1574 1573                                          maxmem4g -= inc;
1575 1574                          }
1576 1575                          pfn -= inc;
1577 1576                  }
1578 1577                  mtype = mnoderanges[mtype].mnr_next;
1579 1578          }
1580 1579  }
1581 1580  
1582 1581  int
1583 1582  mtype_2_mrange(int mtype)
1584 1583  {
1585 1584          return (mnoderanges[mtype].mnr_memrange);
1586 1585  }
1587 1586  
1588 1587  void
1589 1588  mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi)
1590 1589  {
1591 1590          _NOTE(ARGUNUSED(mnode));
1592 1591          ASSERT(mnoderanges[mtype].mnr_mnode == mnode);
1593 1592          *pfnlo = mnoderanges[mtype].mnr_pfnlo;
1594 1593          *pfnhi = mnoderanges[mtype].mnr_pfnhi;
1595 1594  }
1596 1595  
1597 1596  size_t
1598 1597  plcnt_sz(size_t ctrs_sz)
1599 1598  {
1600 1599  #ifdef DEBUG
1601 1600          int     szc, colors;
1602 1601  
1603 1602          ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes;
1604 1603          for (szc = 0; szc < mmu_page_sizes; szc++) {
1605 1604                  colors = page_get_pagecolors(szc);
1606 1605                  ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;
1607 1606          }
1608 1607  #endif
1609 1608          return (ctrs_sz);
1610 1609  }
1611 1610  
1612 1611  caddr_t
1613 1612  plcnt_init(caddr_t addr)
1614 1613  {
1615 1614  #ifdef DEBUG
1616 1615          int     mt, szc, colors;
1617 1616  
1618 1617          for (mt = 0; mt < mnoderangecnt; mt++) {
1619 1618                  mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;
1620 1619                  addr += (sizeof (struct mnr_mts) * mmu_page_sizes);
1621 1620                  for (szc = 0; szc < mmu_page_sizes; szc++) {
1622 1621                          colors = page_get_pagecolors(szc);
1623 1622                          mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors;
1624 1623                          mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =
1625 1624                              (pgcnt_t *)addr;
1626 1625                          addr += (sizeof (pgcnt_t) * colors);
1627 1626                  }
1628 1627          }
1629 1628  #endif
1630 1629          return (addr);
1631 1630  }
1632 1631  
1633 1632  void
1634 1633  plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags)
1635 1634  {
1636 1635          _NOTE(ARGUNUSED(pp));
1637 1636  #ifdef DEBUG
1638 1637          int     bin = PP_2_BIN(pp);
1639 1638  
1640 1639          atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt);
1641 1640          atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin],
1642 1641              cnt);
1643 1642  #endif
1644 1643          ASSERT(mtype == PP_2_MTYPE(pp));
1645 1644          if (physmax4g && mnoderanges[mtype].mnr_memrange != MRI_4G)
1646 1645                  atomic_add_long(&freemem4g, cnt);
1647 1646          if (flags & PG_CACHE_LIST)
1648 1647                  atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt);
1649 1648          else
1650 1649                  atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt);
1651 1650          atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt);
1652 1651  }
1653 1652  
1654 1653  /*
1655 1654   * Returns the free page count for mnode
1656 1655   */
1657 1656  int
1658 1657  mnode_pgcnt(int mnode)
1659 1658  {
1660 1659          int     mtype = mtypetop;
1661 1660          int     flags = PGI_MT_RANGE0;
1662 1661          pgcnt_t pgcnt = 0;
1663 1662  
1664 1663          mtype = mtype_func(mnode, mtype, flags);
1665 1664  
1666 1665          while (mtype != -1) {
1667 1666                  pgcnt += MTYPE_FREEMEM(mtype);
1668 1667                  mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);
1669 1668          }
1670 1669          return (pgcnt);
1671 1670  }
1672 1671  
1673 1672  /*
1674 1673   * Initialize page coloring variables based on the l2 cache parameters.
1675 1674   * Calculate and return memory needed for page coloring data structures.
1676 1675   */
1677 1676  size_t
1678 1677  page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc)
1679 1678  {
1680 1679          _NOTE(ARGUNUSED(l2_linesz));
1681 1680          size_t  colorsz = 0;
1682 1681          int     i;
1683 1682          int     colors;
1684 1683  
1685 1684  #if defined(__xpv)
1686 1685          /*
1687 1686           * Hypervisor domains currently don't have any concept of NUMA.
1688 1687           * Hence we'll act like there is only 1 memrange.
1689 1688           */
1690 1689          i = memrange_num(1);
1691 1690  #else /* !__xpv */
1692 1691          /*
1693 1692           * Reduce the memory ranges lists if we don't have large amounts
1694 1693           * of memory. This avoids searching known empty free lists.
1695 1694           * To support memory DR operations, we need to keep memory ranges
1696 1695           * for possible memory hot-add operations.
1697 1696           */
1698 1697          if (plat_dr_physmax > physmax)
1699 1698                  i = memrange_num(plat_dr_physmax);
1700 1699          else
1701 1700                  i = memrange_num(physmax);
1702 1701  #if defined(__i386)
1703 1702          if (i > MRI_4G)
1704 1703                  restricted_kmemalloc = 0;
1705 1704  #endif
1706 1705          /* physmax greater than 4g */
1707 1706          if (i == MRI_4G)
1708 1707                  physmax4g = 1;
1709 1708  #endif /* !__xpv */
1710 1709          memranges += i;
1711 1710          nranges -= i;
1712 1711  
1713 1712          ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES);
1714 1713  
1715 1714          ASSERT(ISP2(l2_linesz));
1716 1715          ASSERT(l2_sz > MMU_PAGESIZE);
1717 1716  
1718 1717          /* l2_assoc is 0 for fully associative l2 cache */
1719 1718          if (l2_assoc)
1720 1719                  l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE));
1721 1720          else
1722 1721                  l2_colors = 1;
1723 1722  
1724 1723          ASSERT(ISP2(l2_colors));
1725 1724  
1726 1725          /* for scalability, configure at least PAGE_COLORS_MIN color bins */
1727 1726          page_colors = MAX(l2_colors, PAGE_COLORS_MIN);
1728 1727  
1729 1728          /*
1730 1729           * cpu_page_colors is non-zero when a page color may be spread across
1731 1730           * multiple bins.
1732 1731           */
1733 1732          if (l2_colors < page_colors)
1734 1733                  cpu_page_colors = l2_colors;
1735 1734  
1736 1735          ASSERT(ISP2(page_colors));
1737 1736  
1738 1737          page_colors_mask = page_colors - 1;
1739 1738  
1740 1739          ASSERT(ISP2(CPUSETSIZE()));
1741 1740          page_coloring_shift = lowbit(CPUSETSIZE());
1742 1741  
1743 1742          /* initialize number of colors per page size */
1744 1743          for (i = 0; i <= mmu.max_page_level; i++) {
1745 1744                  hw_page_array[i].hp_size = LEVEL_SIZE(i);
1746 1745                  hw_page_array[i].hp_shift = LEVEL_SHIFT(i);
1747 1746                  hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0);
1748 1747                  hw_page_array[i].hp_colors = (page_colors_mask >>
1749 1748                      (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift))
1750 1749                      + 1;
1751 1750                  colorequivszc[i] = 0;
1752 1751          }
1753 1752  
1754 1753          /*
1755 1754           * The value of cpu_page_colors determines if additional color bins
1756 1755           * need to be checked for a particular color in the page_get routines.
1757 1756           */
1758 1757          if (cpu_page_colors != 0) {
1759 1758  
1760 1759                  int a = lowbit(page_colors) - lowbit(cpu_page_colors);
1761 1760                  ASSERT(a > 0);
1762 1761                  ASSERT(a < 16);
1763 1762  
1764 1763                  for (i = 0; i <= mmu.max_page_level; i++) {
1765 1764                          if ((colors = hw_page_array[i].hp_colors) <= 1) {
1766 1765                                  colorequivszc[i] = 0;
1767 1766                                  continue;
1768 1767                          }
1769 1768                          while ((colors >> a) == 0)
1770 1769                                  a--;
1771 1770                          ASSERT(a >= 0);
1772 1771  
1773 1772                          /* higher 4 bits encodes color equiv mask */
1774 1773                          colorequivszc[i] = (a << 4);
1775 1774                  }
1776 1775          }
1777 1776  
1778 1777          /* factor in colorequiv to check additional 'equivalent' bins. */
1779 1778          if (colorequiv > 1) {
1780 1779  
1781 1780                  int a = lowbit(colorequiv) - 1;
1782 1781                  if (a > 15)
1783 1782                          a = 15;
1784 1783  
1785 1784                  for (i = 0; i <= mmu.max_page_level; i++) {
1786 1785                          if ((colors = hw_page_array[i].hp_colors) <= 1) {
1787 1786                                  continue;
1788 1787                          }
1789 1788                          while ((colors >> a) == 0)
1790 1789                                  a--;
1791 1790                          if ((a << 4) > colorequivszc[i]) {
1792 1791                                  colorequivszc[i] = (a << 4);
1793 1792                          }
1794 1793                  }
1795 1794          }
1796 1795  
1797 1796          /* size for mnoderanges */
1798 1797          for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++)
1799 1798                  mnoderangecnt += mnode_range_cnt(i);
1800 1799          if (plat_dr_support_memory()) {
1801 1800                  /*
1802 1801                   * Reserve enough space for memory DR operations.
1803 1802                   * Two extra mnoderanges for possbile fragmentations,
1804 1803                   * one for the 2G boundary and the other for the 4G boundary.
1805 1804                   * We don't expect a memory board crossing the 16M boundary
1806 1805                   * for memory hot-add operations on x86 platforms.
1807 1806                   */
1808 1807                  mnoderangecnt += 2 + max_mem_nodes - lgrp_plat_node_cnt;
1809 1808          }
1810 1809          colorsz = mnoderangecnt * sizeof (mnoderange_t);
1811 1810  
1812 1811          /* size for fpc_mutex and cpc_mutex */
1813 1812          colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX);
1814 1813  
1815 1814          /* size of page_freelists */
1816 1815          colorsz += mnoderangecnt * sizeof (page_t ***);
1817 1816          colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **);
1818 1817  
1819 1818          for (i = 0; i < mmu_page_sizes; i++) {
1820 1819                  colors = page_get_pagecolors(i);
1821 1820                  colorsz += mnoderangecnt * colors * sizeof (page_t *);
1822 1821          }
1823 1822  
1824 1823          /* size of page_cachelists */
1825 1824          colorsz += mnoderangecnt * sizeof (page_t **);
1826 1825          colorsz += mnoderangecnt * page_colors * sizeof (page_t *);
1827 1826  
1828 1827          return (colorsz);
1829 1828  }
1830 1829  
1831 1830  /*
1832 1831   * Called once at startup to configure page_coloring data structures and
1833 1832   * does the 1st page_free()/page_freelist_add().
1834 1833   */
1835 1834  void
1836 1835  page_coloring_setup(caddr_t pcmemaddr)
1837 1836  {
1838 1837          int     i;
1839 1838          int     j;
1840 1839          int     k;
1841 1840          caddr_t addr;
1842 1841          int     colors;
1843 1842  
1844 1843          /*
1845 1844           * do page coloring setup
1846 1845           */
1847 1846          addr = pcmemaddr;
1848 1847  
1849 1848          mnoderanges = (mnoderange_t *)addr;
1850 1849          addr += (mnoderangecnt * sizeof (mnoderange_t));
1851 1850  
1852 1851          mnode_range_setup(mnoderanges);
1853 1852  
1854 1853          if (physmax4g)
1855 1854                  mtype4g = pfn_2_mtype(0xfffff);
1856 1855  
1857 1856          for (k = 0; k < NPC_MUTEX; k++) {
1858 1857                  fpc_mutex[k] = (kmutex_t *)addr;
1859 1858                  addr += (max_mem_nodes * sizeof (kmutex_t));
1860 1859          }
1861 1860          for (k = 0; k < NPC_MUTEX; k++) {
1862 1861                  cpc_mutex[k] = (kmutex_t *)addr;
1863 1862                  addr += (max_mem_nodes * sizeof (kmutex_t));
1864 1863          }
1865 1864          page_freelists = (page_t ****)addr;
1866 1865          addr += (mnoderangecnt * sizeof (page_t ***));
1867 1866  
1868 1867          page_cachelists = (page_t ***)addr;
1869 1868          addr += (mnoderangecnt * sizeof (page_t **));
1870 1869  
1871 1870          for (i = 0; i < mnoderangecnt; i++) {
1872 1871                  page_freelists[i] = (page_t ***)addr;
1873 1872                  addr += (mmu_page_sizes * sizeof (page_t **));
1874 1873  
1875 1874                  for (j = 0; j < mmu_page_sizes; j++) {
1876 1875                          colors = page_get_pagecolors(j);
1877 1876                          page_freelists[i][j] = (page_t **)addr;
1878 1877                          addr += (colors * sizeof (page_t *));
1879 1878                  }
1880 1879                  page_cachelists[i] = (page_t **)addr;
1881 1880                  addr += (page_colors * sizeof (page_t *));
1882 1881          }
1883 1882  }
1884 1883  
1885 1884  #if defined(__xpv)
1886 1885  /*
1887 1886   * Give back 10% of the io_pool pages to the free list.
1888 1887   * Don't shrink the pool below some absolute minimum.
1889 1888   */
1890 1889  static void
1891 1890  page_io_pool_shrink()
1892 1891  {
1893 1892          int retcnt;
1894 1893          page_t *pp, *pp_first, *pp_last, **curpool;
1895 1894          mfn_t mfn;
1896 1895          int bothpools = 0;
1897 1896  
1898 1897          mutex_enter(&io_pool_lock);
1899 1898          io_pool_shrink_attempts++;      /* should be a kstat? */
1900 1899          retcnt = io_pool_cnt / 10;
1901 1900          if (io_pool_cnt - retcnt < io_pool_cnt_min)
1902 1901                  retcnt = io_pool_cnt - io_pool_cnt_min;
1903 1902          if (retcnt <= 0)
1904 1903                  goto done;
1905 1904          io_pool_shrinks++;      /* should be a kstat? */
1906 1905          curpool = &io_pool_4g;
1907 1906  domore:
1908 1907          /*
1909 1908           * Loop through taking pages from the end of the list
1910 1909           * (highest mfns) till amount to return reached.
1911 1910           */
1912 1911          for (pp = *curpool; pp && retcnt > 0; ) {
1913 1912                  pp_first = pp_last = pp->p_prev;
1914 1913                  if (pp_first == *curpool)
1915 1914                          break;
1916 1915                  retcnt--;
1917 1916                  io_pool_cnt--;
1918 1917                  page_io_pool_sub(curpool, pp_first, pp_last);
1919 1918                  if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn)
1920 1919                          start_mfn = mfn;
1921 1920                  page_free(pp_first, 1);
1922 1921                  pp = *curpool;
1923 1922          }
1924 1923          if (retcnt != 0 && !bothpools) {
1925 1924                  /*
1926 1925                   * If not enough found in less constrained pool try the
1927 1926                   * more constrained one.
1928 1927                   */
1929 1928                  curpool = &io_pool_16m;
1930 1929                  bothpools = 1;
1931 1930                  goto domore;
1932 1931          }
1933 1932  done:
1934 1933          mutex_exit(&io_pool_lock);
1935 1934  }
1936 1935  
1937 1936  #endif  /* __xpv */
1938 1937  
1939 1938  uint_t
1940 1939  page_create_update_flags_x86(uint_t flags)
1941 1940  {
1942 1941  #if defined(__xpv)
1943 1942          /*
1944 1943           * Check this is an urgent allocation and free pages are depleted.
1945 1944           */
1946 1945          if (!(flags & PG_WAIT) && freemem < desfree)
1947 1946                  page_io_pool_shrink();
1948 1947  #else /* !__xpv */
1949 1948          /*
1950 1949           * page_create_get_something may call this because 4g memory may be
1951 1950           * depleted. Set flags to allow for relocation of base page below
1952 1951           * 4g if necessary.
1953 1952           */
1954 1953          if (physmax4g)
1955 1954                  flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
1956 1955  #endif /* __xpv */
1957 1956          return (flags);
1958 1957  }
1959 1958  
1960 1959  /*ARGSUSED*/
1961 1960  int
1962 1961  bp_color(struct buf *bp)
1963 1962  {
1964 1963          return (0);
1965 1964  }
1966 1965  
1967 1966  #if defined(__xpv)
1968 1967  
1969 1968  /*
1970 1969   * Take pages out of an io_pool
1971 1970   */
1972 1971  static void
1973 1972  page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last)
1974 1973  {
1975 1974          if (*poolp == pp_first) {
1976 1975                  *poolp = pp_last->p_next;
1977 1976                  if (*poolp == pp_first)
1978 1977                          *poolp = NULL;
1979 1978          }
1980 1979          pp_first->p_prev->p_next = pp_last->p_next;
1981 1980          pp_last->p_next->p_prev = pp_first->p_prev;
1982 1981          pp_first->p_prev = pp_last;
1983 1982          pp_last->p_next = pp_first;
1984 1983  }
1985 1984  
1986 1985  /*
1987 1986   * Put a page on the io_pool list. The list is ordered by increasing MFN.
1988 1987   */
1989 1988  static void
1990 1989  page_io_pool_add(page_t **poolp, page_t *pp)
1991 1990  {
1992 1991          page_t  *look;
1993 1992          mfn_t   mfn = mfn_list[pp->p_pagenum];
1994 1993  
1995 1994          if (*poolp == NULL) {
1996 1995                  *poolp = pp;
1997 1996                  pp->p_next = pp;
1998 1997                  pp->p_prev = pp;
1999 1998                  return;
2000 1999          }
2001 2000  
2002 2001          /*
2003 2002           * Since we try to take pages from the high end of the pool
2004 2003           * chances are good that the pages to be put on the list will
2005 2004           * go at or near the end of the list. so start at the end and
2006 2005           * work backwards.
2007 2006           */
2008 2007          look = (*poolp)->p_prev;
2009 2008          while (mfn < mfn_list[look->p_pagenum]) {
2010 2009                  look = look->p_prev;
2011 2010                  if (look == (*poolp)->p_prev)
2012 2011                          break; /* backed all the way to front of list */
2013 2012          }
2014 2013  
2015 2014          /* insert after look */
2016 2015          pp->p_prev = look;
2017 2016          pp->p_next = look->p_next;
2018 2017          pp->p_next->p_prev = pp;
2019 2018          look->p_next = pp;
2020 2019          if (mfn < mfn_list[(*poolp)->p_pagenum]) {
2021 2020                  /*
2022 2021                   * we inserted a new first list element
2023 2022                   * adjust pool pointer to newly inserted element
2024 2023                   */
2025 2024                  *poolp = pp;
2026 2025          }
2027 2026  }
2028 2027  
2029 2028  /*
2030 2029   * Add a page to the io_pool.  Setting the force flag will force the page
2031 2030   * into the io_pool no matter what.
2032 2031   */
2033 2032  static void
2034 2033  add_page_to_pool(page_t *pp, int force)
2035 2034  {
2036 2035          page_t *highest;
2037 2036          page_t *freep = NULL;
2038 2037  
2039 2038          mutex_enter(&io_pool_lock);
2040 2039          /*
2041 2040           * Always keep the scarce low memory pages
2042 2041           */
2043 2042          if (mfn_list[pp->p_pagenum] < PFN_16MEG) {
2044 2043                  ++io_pool_cnt;
2045 2044                  page_io_pool_add(&io_pool_16m, pp);
2046 2045                  goto done;
2047 2046          }
2048 2047          if (io_pool_cnt < io_pool_cnt_max || force || io_pool_4g == NULL) {
2049 2048                  ++io_pool_cnt;
2050 2049                  page_io_pool_add(&io_pool_4g, pp);
2051 2050          } else {
2052 2051                  highest = io_pool_4g->p_prev;
2053 2052                  if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) {
2054 2053                          page_io_pool_sub(&io_pool_4g, highest, highest);
2055 2054                          page_io_pool_add(&io_pool_4g, pp);
2056 2055                          freep = highest;
2057 2056                  } else {
2058 2057                          freep = pp;
2059 2058                  }
2060 2059          }
2061 2060  done:
2062 2061          mutex_exit(&io_pool_lock);
2063 2062          if (freep)
2064 2063                  page_free(freep, 1);
2065 2064  }
2066 2065  
2067 2066  
2068 2067  int contig_pfn_cnt;     /* no of pfns in the contig pfn list */
2069 2068  int contig_pfn_max;     /* capacity of the contig pfn list */
2070 2069  int next_alloc_pfn;     /* next position in list to start a contig search */
2071 2070  int contig_pfnlist_updates;     /* pfn list update count */
2072 2071  int contig_pfnlist_builds;      /* how many times have we (re)built list */
2073 2072  int contig_pfnlist_buildfailed; /* how many times has list build failed */
2074 2073  int create_contig_pending;      /* nonzero means taskq creating contig list */
2075 2074  pfn_t *contig_pfn_list = NULL;  /* list of contig pfns in ascending mfn order */
2076 2075  
2077 2076  /*
2078 2077   * Function to use in sorting a list of pfns by their underlying mfns.
2079 2078   */
2080 2079  static int
2081 2080  mfn_compare(const void *pfnp1, const void *pfnp2)
2082 2081  {
2083 2082          mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1];
2084 2083          mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2];
2085 2084  
2086 2085          if (mfn1 > mfn2)
2087 2086                  return (1);
2088 2087          if (mfn1 < mfn2)
2089 2088                  return (-1);
2090 2089          return (0);
2091 2090  }
2092 2091  
2093 2092  /*
2094 2093   * Compact the contig_pfn_list by tossing all the non-contiguous
2095 2094   * elements from the list.
2096 2095   */
2097 2096  static void
2098 2097  compact_contig_pfn_list(void)
2099 2098  {
2100 2099          pfn_t pfn, lapfn, prev_lapfn;
2101 2100          mfn_t mfn;
2102 2101          int i, newcnt = 0;
2103 2102  
2104 2103          prev_lapfn = 0;
2105 2104          for (i = 0; i < contig_pfn_cnt - 1; i++) {
2106 2105                  pfn = contig_pfn_list[i];
2107 2106                  lapfn = contig_pfn_list[i + 1];
2108 2107                  mfn = mfn_list[pfn];
2109 2108                  /*
2110 2109                   * See if next pfn is for a contig mfn
2111 2110                   */
2112 2111                  if (mfn_list[lapfn] != mfn + 1)
2113 2112                          continue;
2114 2113                  /*
2115 2114                   * pfn and lookahead are both put in list
2116 2115                   * unless pfn is the previous lookahead.
2117 2116                   */
2118 2117                  if (pfn != prev_lapfn)
2119 2118                          contig_pfn_list[newcnt++] = pfn;
2120 2119                  contig_pfn_list[newcnt++] = lapfn;
2121 2120                  prev_lapfn = lapfn;
2122 2121          }
2123 2122          for (i = newcnt; i < contig_pfn_cnt; i++)
2124 2123                  contig_pfn_list[i] = 0;
2125 2124          contig_pfn_cnt = newcnt;
2126 2125  }
2127 2126  
2128 2127  /*ARGSUSED*/
2129 2128  static void
2130 2129  call_create_contiglist(void *arg)
2131 2130  {
2132 2131          (void) create_contig_pfnlist(PG_WAIT);
2133 2132  }
2134 2133  
2135 2134  /*
2136 2135   * Create list of freelist pfns that have underlying
2137 2136   * contiguous mfns.  The list is kept in ascending mfn order.
2138 2137   * returns 1 if list created else 0.
2139 2138   */
2140 2139  static int
2141 2140  create_contig_pfnlist(uint_t flags)
2142 2141  {
2143 2142          pfn_t pfn;
2144 2143          page_t *pp;
2145 2144          int ret = 1;
2146 2145  
2147 2146          mutex_enter(&contig_list_lock);
2148 2147          if (contig_pfn_list != NULL)
2149 2148                  goto out;
2150 2149          contig_pfn_max = freemem + (freemem / 10);
2151 2150          contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t),
2152 2151              (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP);
2153 2152          if (contig_pfn_list == NULL) {
2154 2153                  /*
2155 2154                   * If we could not create the contig list (because
2156 2155                   * we could not sleep for memory).  Dispatch a taskq that can
2157 2156                   * sleep to get the memory.
2158 2157                   */
2159 2158                  if (!create_contig_pending) {
2160 2159                          if (taskq_dispatch(system_taskq, call_create_contiglist,
2161 2160                              NULL, TQ_NOSLEEP) != NULL)
2162 2161                                  create_contig_pending = 1;
2163 2162                  }
2164 2163                  contig_pfnlist_buildfailed++;   /* count list build failures */
2165 2164                  ret = 0;
2166 2165                  goto out;
2167 2166          }
2168 2167          create_contig_pending = 0;
2169 2168          ASSERT(contig_pfn_cnt == 0);
2170 2169          for (pfn = 0; pfn < mfn_count; pfn++) {
2171 2170                  pp = page_numtopp_nolock(pfn);
2172 2171                  if (pp == NULL || !PP_ISFREE(pp))
2173 2172                          continue;
2174 2173                  contig_pfn_list[contig_pfn_cnt] = pfn;
2175 2174                  if (++contig_pfn_cnt == contig_pfn_max)
2176 2175                          break;
2177 2176          }
2178 2177          /*
2179 2178           * Sanity check the new list.
2180 2179           */
2181 2180          if (contig_pfn_cnt < 2) { /* no contig pfns */
2182 2181                  contig_pfn_cnt = 0;
2183 2182                  contig_pfnlist_buildfailed++;
2184 2183                  kmem_free(contig_pfn_list, contig_pfn_max * sizeof (pfn_t));
2185 2184                  contig_pfn_list = NULL;
2186 2185                  contig_pfn_max = 0;
2187 2186                  ret = 0;
2188 2187                  goto out;
2189 2188          }
2190 2189          qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare);
2191 2190          compact_contig_pfn_list();
2192 2191          /*
2193 2192           * Make sure next search of the newly created contiguous pfn
2194 2193           * list starts at the beginning of the list.
2195 2194           */
2196 2195          next_alloc_pfn = 0;
2197 2196          contig_pfnlist_builds++;        /* count list builds */
2198 2197  out:
2199 2198          mutex_exit(&contig_list_lock);
2200 2199          return (ret);
2201 2200  }
2202 2201  
2203 2202  
2204 2203  /*
2205 2204   * Toss the current contig pfnlist.  Someone is about to do a massive
2206 2205   * update to pfn<->mfn mappings.  So we have them destroy the list and lock
2207 2206   * it till they are done with their update.
2208 2207   */
2209 2208  void
2210 2209  clear_and_lock_contig_pfnlist()
2211 2210  {
2212 2211          pfn_t *listp = NULL;
2213 2212          size_t listsize;
2214 2213  
2215 2214          mutex_enter(&contig_list_lock);
2216 2215          if (contig_pfn_list != NULL) {
2217 2216                  listp = contig_pfn_list;
2218 2217                  listsize = contig_pfn_max * sizeof (pfn_t);
2219 2218                  contig_pfn_list = NULL;
2220 2219                  contig_pfn_max = contig_pfn_cnt = 0;
2221 2220          }
2222 2221          if (listp != NULL)
2223 2222                  kmem_free(listp, listsize);
2224 2223  }
2225 2224  
2226 2225  /*
2227 2226   * Unlock the contig_pfn_list.  The next attempted use of it will cause
2228 2227   * it to be re-created.
2229 2228   */
2230 2229  void
2231 2230  unlock_contig_pfnlist()
2232 2231  {
2233 2232          mutex_exit(&contig_list_lock);
2234 2233  }
2235 2234  
2236 2235  /*
2237 2236   * Update the contiguous pfn list in response to a pfn <-> mfn reassignment
2238 2237   */
2239 2238  void
2240 2239  update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn)
2241 2240  {
2242 2241          int probe_hi, probe_lo, probe_pos, insert_after, insert_point;
2243 2242          pfn_t probe_pfn;
2244 2243          mfn_t probe_mfn;
2245 2244          int drop_lock = 0;
2246 2245  
2247 2246          if (mutex_owner(&contig_list_lock) != curthread) {
2248 2247                  drop_lock = 1;
2249 2248                  mutex_enter(&contig_list_lock);
2250 2249          }
2251 2250          if (contig_pfn_list == NULL)
2252 2251                  goto done;
2253 2252          contig_pfnlist_updates++;
2254 2253          /*
2255 2254           * Find the pfn in the current list.  Use a binary chop to locate it.
2256 2255           */
2257 2256          probe_hi = contig_pfn_cnt - 1;
2258 2257          probe_lo = 0;
2259 2258          probe_pos = (probe_hi + probe_lo) / 2;
2260 2259          while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) {
2261 2260                  if (probe_pos == probe_lo) { /* pfn not in list */
2262 2261                          probe_pos = -1;
2263 2262                          break;
2264 2263                  }
2265 2264                  if (pfn_to_mfn(probe_pfn) <= oldmfn)
2266 2265                          probe_lo = probe_pos;
2267 2266                  else
2268 2267                          probe_hi = probe_pos;
2269 2268                  probe_pos = (probe_hi + probe_lo) / 2;
2270 2269          }
2271 2270          if (probe_pos >= 0) {
2272 2271                  /*
2273 2272                   * Remove pfn from list and ensure next alloc
2274 2273                   * position stays in bounds.
2275 2274                   */
2276 2275                  if (--contig_pfn_cnt <= next_alloc_pfn)
2277 2276                          next_alloc_pfn = 0;
2278 2277                  if (contig_pfn_cnt < 2) { /* no contig pfns */
2279 2278                          contig_pfn_cnt = 0;
2280 2279                          kmem_free(contig_pfn_list,
2281 2280                              contig_pfn_max * sizeof (pfn_t));
2282 2281                          contig_pfn_list = NULL;
2283 2282                          contig_pfn_max = 0;
2284 2283                          goto done;
2285 2284                  }
2286 2285                  ovbcopy(&contig_pfn_list[probe_pos + 1],
2287 2286                      &contig_pfn_list[probe_pos],
2288 2287                      (contig_pfn_cnt - probe_pos) * sizeof (pfn_t));
2289 2288          }
2290 2289          if (newmfn == MFN_INVALID)
2291 2290                  goto done;
2292 2291          /*
2293 2292           * Check if new mfn has adjacent mfns in the list
2294 2293           */
2295 2294          probe_hi = contig_pfn_cnt - 1;
2296 2295          probe_lo = 0;
2297 2296          insert_after = -2;
2298 2297          do {
2299 2298                  probe_pos = (probe_hi + probe_lo) / 2;
2300 2299                  probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]);
2301 2300                  if (newmfn == probe_mfn + 1)
2302 2301                          insert_after = probe_pos;
2303 2302                  else if (newmfn == probe_mfn - 1)
2304 2303                          insert_after = probe_pos - 1;
2305 2304                  if (probe_pos == probe_lo)
2306 2305                          break;
2307 2306                  if (probe_mfn <= newmfn)
2308 2307                          probe_lo = probe_pos;
2309 2308                  else
2310 2309                          probe_hi = probe_pos;
2311 2310          } while (insert_after == -2);
2312 2311          /*
2313 2312           * If there is space in the list and there are adjacent mfns
2314 2313           * insert the pfn in to its proper place in the list.
2315 2314           */
2316 2315          if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) {
2317 2316                  insert_point = insert_after + 1;
2318 2317                  ovbcopy(&contig_pfn_list[insert_point],
2319 2318                      &contig_pfn_list[insert_point + 1],
2320 2319                      (contig_pfn_cnt - insert_point) * sizeof (pfn_t));
2321 2320                  contig_pfn_list[insert_point] = pfn;
2322 2321                  contig_pfn_cnt++;
2323 2322          }
2324 2323  done:
2325 2324          if (drop_lock)
2326 2325                  mutex_exit(&contig_list_lock);
2327 2326  }
2328 2327  
2329 2328  /*
2330 2329   * Called to (re-)populate the io_pool from the free page lists.
2331 2330   */
2332 2331  long
2333 2332  populate_io_pool(void)
2334 2333  {
2335 2334          pfn_t pfn;
2336 2335          mfn_t mfn, max_mfn;
2337 2336          page_t *pp;
2338 2337  
2339 2338          /*
2340 2339           * Figure out the bounds of the pool on first invocation.
2341 2340           * We use a percentage of memory for the io pool size.
2342 2341           * we allow that to shrink, but not to less than a fixed minimum
2343 2342           */
2344 2343          if (io_pool_cnt_max == 0) {
2345 2344                  io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct);
2346 2345                  io_pool_cnt_lowater = io_pool_cnt_max;
2347 2346                  /*
2348 2347                   * This is the first time in populate_io_pool, grab a va to use
2349 2348                   * when we need to allocate pages.
2350 2349                   */
2351 2350                  io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
2352 2351          }
2353 2352          /*
2354 2353           * If we are out of pages in the pool, then grow the size of the pool
2355 2354           */
2356 2355          if (io_pool_cnt == 0) {
2357 2356                  /*
2358 2357                   * Grow the max size of the io pool by 5%, but never more than
2359 2358                   * 25% of physical memory.
2360 2359                   */
2361 2360                  if (io_pool_cnt_max < physmem / 4)
2362 2361                          io_pool_cnt_max += io_pool_cnt_max / 20;
2363 2362          }
2364 2363          io_pool_grows++;        /* should be a kstat? */
2365 2364  
2366 2365          /*
2367 2366           * Get highest mfn on this platform, but limit to the 32 bit DMA max.
2368 2367           */
2369 2368          (void) mfn_to_pfn(start_mfn);
2370 2369          max_mfn = MIN(cached_max_mfn, PFN_4GIG);
2371 2370          for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) {
2372 2371                  pfn = mfn_to_pfn(mfn);
2373 2372                  if (pfn & PFN_IS_FOREIGN_MFN)
2374 2373                          continue;
2375 2374                  /*
2376 2375                   * try to allocate it from free pages
2377 2376                   */
2378 2377                  pp = page_numtopp_alloc(pfn);
2379 2378                  if (pp == NULL)
2380 2379                          continue;
2381 2380                  PP_CLRFREE(pp);
2382 2381                  add_page_to_pool(pp, 1);
2383 2382                  if (io_pool_cnt >= io_pool_cnt_max)
2384 2383                          break;
2385 2384          }
2386 2385  
2387 2386          return (io_pool_cnt);
2388 2387  }
2389 2388  
2390 2389  /*
2391 2390   * Destroy a page that was being used for DMA I/O. It may or
2392 2391   * may not actually go back to the io_pool.
2393 2392   */
2394 2393  void
2395 2394  page_destroy_io(page_t *pp)
2396 2395  {
2397 2396          mfn_t mfn = mfn_list[pp->p_pagenum];
2398 2397  
2399 2398          /*
2400 2399           * When the page was alloc'd a reservation was made, release it now
2401 2400           */
2402 2401          page_unresv(1);
2403 2402          /*
2404 2403           * Unload translations, if any, then hash out the
2405 2404           * page to erase its identity.
2406 2405           */
2407 2406          (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
2408 2407          page_hashout(pp, NULL);
2409 2408  
2410 2409          /*
2411 2410           * If the page came from the free lists, just put it back to them.
2412 2411           * DomU pages always go on the free lists as well.
2413 2412           */
2414 2413          if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) {
2415 2414                  page_free(pp, 1);
2416 2415                  return;
2417 2416          }
2418 2417  
2419 2418          add_page_to_pool(pp, 0);
2420 2419  }
2421 2420  
2422 2421  
2423 2422  long contig_searches;           /* count of times contig pages requested */
2424 2423  long contig_search_restarts;    /* count of contig ranges tried */
2425 2424  long contig_search_failed;      /* count of contig alloc failures */
2426 2425  
2427 2426  /*
2428 2427   * Free partial page list
2429 2428   */
2430 2429  static void
2431 2430  free_partial_list(page_t **pplist)
2432 2431  {
2433 2432          page_t *pp;
2434 2433  
2435 2434          while (*pplist != NULL) {
2436 2435                  pp = *pplist;
2437 2436                  page_io_pool_sub(pplist, pp, pp);
2438 2437                  page_free(pp, 1);
2439 2438          }
2440 2439  }
2441 2440  
2442 2441  /*
2443 2442   * Look thru the contiguous pfns that are not part of the io_pool for
2444 2443   * contiguous free pages.  Return a list of the found pages or NULL.
2445 2444   */
2446 2445  page_t *
2447 2446  find_contig_free(uint_t npages, uint_t flags, uint64_t pfnseg,
2448 2447      pgcnt_t pfnalign)
2449 2448  {
2450 2449          page_t *pp, *plist = NULL;
2451 2450          mfn_t mfn, prev_mfn, start_mfn;
2452 2451          pfn_t pfn;
2453 2452          int pages_needed, pages_requested;
2454 2453          int search_start;
2455 2454  
2456 2455          /*
2457 2456           * create the contig pfn list if not already done
2458 2457           */
2459 2458  retry:
2460 2459          mutex_enter(&contig_list_lock);
2461 2460          if (contig_pfn_list == NULL) {
2462 2461                  mutex_exit(&contig_list_lock);
2463 2462                  if (!create_contig_pfnlist(flags)) {
2464 2463                          return (NULL);
2465 2464                  }
2466 2465                  goto retry;
2467 2466          }
2468 2467          contig_searches++;
2469 2468          /*
2470 2469           * Search contiguous pfn list for physically contiguous pages not in
2471 2470           * the io_pool.  Start the search where the last search left off.
2472 2471           */
2473 2472          pages_requested = pages_needed = npages;
2474 2473          search_start = next_alloc_pfn;
2475 2474          start_mfn = prev_mfn = 0;
2476 2475          while (pages_needed) {
2477 2476                  pfn = contig_pfn_list[next_alloc_pfn];
2478 2477                  mfn = pfn_to_mfn(pfn);
2479 2478                  /*
2480 2479                   * Check if mfn is first one or contig to previous one and
2481 2480                   * if page corresponding to mfn is free and that mfn
2482 2481                   * range is not crossing a segment boundary.
2483 2482                   */
2484 2483                  if ((prev_mfn == 0 || mfn == prev_mfn + 1) &&
2485 2484                      (pp = page_numtopp_alloc(pfn)) != NULL &&
2486 2485                      !((mfn & pfnseg) < (start_mfn & pfnseg))) {
2487 2486                          PP_CLRFREE(pp);
2488 2487                          page_io_pool_add(&plist, pp);
2489 2488                          pages_needed--;
2490 2489                          if (prev_mfn == 0) {
2491 2490                                  if (pfnalign &&
2492 2491                                      mfn != P2ROUNDUP(mfn, pfnalign)) {
2493 2492                                          /*
2494 2493                                           * not properly aligned
2495 2494                                           */
2496 2495                                          contig_search_restarts++;
2497 2496                                          free_partial_list(&plist);
2498 2497                                          pages_needed = pages_requested;
2499 2498                                          start_mfn = prev_mfn = 0;
2500 2499                                          goto skip;
2501 2500                                  }
2502 2501                                  start_mfn = mfn;
2503 2502                          }
2504 2503                          prev_mfn = mfn;
2505 2504                  } else {
2506 2505                          contig_search_restarts++;
2507 2506                          free_partial_list(&plist);
2508 2507                          pages_needed = pages_requested;
2509 2508                          start_mfn = prev_mfn = 0;
2510 2509                  }
2511 2510  skip:
2512 2511                  if (++next_alloc_pfn == contig_pfn_cnt)
2513 2512                          next_alloc_pfn = 0;
2514 2513                  if (next_alloc_pfn == search_start)
2515 2514                          break; /* all pfns searched */
2516 2515          }
2517 2516          mutex_exit(&contig_list_lock);
2518 2517          if (pages_needed) {
2519 2518                  contig_search_failed++;
2520 2519                  /*
2521 2520                   * Failed to find enough contig pages.
2522 2521                   * free partial page list
2523 2522                   */
2524 2523                  free_partial_list(&plist);
2525 2524          }
2526 2525          return (plist);
2527 2526  }
2528 2527  
2529 2528  /*
2530 2529   * Search the reserved io pool pages for a page range with the
2531 2530   * desired characteristics.
2532 2531   */
2533 2532  page_t *
2534 2533  page_io_pool_alloc(ddi_dma_attr_t *mattr, int contig, pgcnt_t minctg)
2535 2534  {
2536 2535          page_t *pp_first, *pp_last;
2537 2536          page_t *pp, **poolp;
2538 2537          pgcnt_t nwanted, pfnalign;
2539 2538          uint64_t pfnseg;
2540 2539          mfn_t mfn, tmfn, hi_mfn, lo_mfn;
2541 2540          int align, attempt = 0;
2542 2541  
2543 2542          if (minctg == 1)
2544 2543                  contig = 0;
2545 2544          lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2546 2545          hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2547 2546          pfnseg = mmu_btop(mattr->dma_attr_seg);
2548 2547          align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2549 2548          if (align > MMU_PAGESIZE)
2550 2549                  pfnalign = mmu_btop(align);
2551 2550          else
2552 2551                  pfnalign = 0;
2553 2552  
2554 2553  try_again:
2555 2554          /*
2556 2555           * See if we want pages for a legacy device
2557 2556           */
2558 2557          if (hi_mfn < PFN_16MEG)
2559 2558                  poolp = &io_pool_16m;
2560 2559          else
2561 2560                  poolp = &io_pool_4g;
2562 2561  try_smaller:
2563 2562          /*
2564 2563           * Take pages from I/O pool. We'll use pages from the highest
2565 2564           * MFN range possible.
2566 2565           */
2567 2566          pp_first = pp_last = NULL;
2568 2567          mutex_enter(&io_pool_lock);
2569 2568          nwanted = minctg;
2570 2569          for (pp = *poolp; pp && nwanted > 0; ) {
2571 2570                  pp = pp->p_prev;
2572 2571  
2573 2572                  /*
2574 2573                   * skip pages above allowable range
2575 2574                   */
2576 2575                  mfn = mfn_list[pp->p_pagenum];
2577 2576                  if (hi_mfn < mfn)
2578 2577                          goto skip;
2579 2578  
2580 2579                  /*
2581 2580                   * stop at pages below allowable range
2582 2581                   */
2583 2582                  if (lo_mfn > mfn)
2584 2583                          break;
2585 2584  restart:
2586 2585                  if (pp_last == NULL) {
2587 2586                          /*
2588 2587                           * Check alignment
2589 2588                           */
2590 2589                          tmfn = mfn - (minctg - 1);
2591 2590                          if (pfnalign && tmfn != P2ROUNDUP(tmfn, pfnalign))
2592 2591                                  goto skip; /* not properly aligned */
2593 2592                          /*
2594 2593                           * Check segment
2595 2594                           */
2596 2595                          if ((mfn & pfnseg) < (tmfn & pfnseg))
2597 2596                                  goto skip; /* crosses seg boundary */
2598 2597                          /*
2599 2598                           * Start building page list
2600 2599                           */
2601 2600                          pp_first = pp_last = pp;
2602 2601                          nwanted--;
2603 2602                  } else {
2604 2603                          /*
2605 2604                           * check physical contiguity if required
2606 2605                           */
2607 2606                          if (contig &&
2608 2607                              mfn_list[pp_first->p_pagenum] != mfn + 1) {
2609 2608                                  /*
2610 2609                                   * not a contiguous page, restart list.
2611 2610                                   */
2612 2611                                  pp_last = NULL;
2613 2612                                  nwanted = minctg;
2614 2613                                  goto restart;
2615 2614                          } else { /* add page to list */
2616 2615                                  pp_first = pp;
2617 2616                                  nwanted--;
2618 2617                          }
2619 2618                  }
2620 2619  skip:
2621 2620                  if (pp == *poolp)
2622 2621                          break;
2623 2622          }
2624 2623  
2625 2624          /*
2626 2625           * If we didn't find memory. Try the more constrained pool, then
2627 2626           * sweep free pages into the DMA pool and try again.
2628 2627           */
2629 2628          if (nwanted != 0) {
2630 2629                  mutex_exit(&io_pool_lock);
2631 2630                  /*
2632 2631                   * If we were looking in the less constrained pool and
2633 2632                   * didn't find pages, try the more constrained pool.
2634 2633                   */
2635 2634                  if (poolp == &io_pool_4g) {
2636 2635                          poolp = &io_pool_16m;
2637 2636                          goto try_smaller;
2638 2637                  }
2639 2638                  kmem_reap();
2640 2639                  if (++attempt < 4) {
2641 2640                          /*
2642 2641                           * Grab some more io_pool pages
2643 2642                           */
2644 2643                          (void) populate_io_pool();
2645 2644                          goto try_again; /* go around and retry */
2646 2645                  }
2647 2646                  return (NULL);
2648 2647          }
2649 2648          /*
2650 2649           * Found the pages, now snip them from the list
2651 2650           */
2652 2651          page_io_pool_sub(poolp, pp_first, pp_last);
2653 2652          io_pool_cnt -= minctg;
2654 2653          /*
2655 2654           * reset low water mark
2656 2655           */
2657 2656          if (io_pool_cnt < io_pool_cnt_lowater)
2658 2657                  io_pool_cnt_lowater = io_pool_cnt;
2659 2658          mutex_exit(&io_pool_lock);
2660 2659          return (pp_first);
2661 2660  }
2662 2661  
2663 2662  page_t *
2664 2663  page_swap_with_hypervisor(struct vnode *vp, u_offset_t off, caddr_t vaddr,
2665 2664      ddi_dma_attr_t *mattr, uint_t flags, pgcnt_t minctg)
2666 2665  {
2667 2666          uint_t kflags;
2668 2667          int order, extra, extpages, i, contig, nbits, extents;
2669 2668          page_t *pp, *expp, *pp_first, **pplist = NULL;
2670 2669          mfn_t *mfnlist = NULL;
2671 2670  
2672 2671          contig = flags & PG_PHYSCONTIG;
2673 2672          if (minctg == 1)
2674 2673                  contig = 0;
2675 2674          flags &= ~PG_PHYSCONTIG;
2676 2675          kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP;
2677 2676          /*
2678 2677           * Hypervisor will allocate extents, if we want contig
2679 2678           * pages extent must be >= minctg
2680 2679           */
2681 2680          if (contig) {
2682 2681                  order = highbit(minctg) - 1;
2683 2682                  if (minctg & ((1 << order) - 1))
2684 2683                          order++;
2685 2684                  extpages = 1 << order;
2686 2685          } else {
2687 2686                  order = 0;
2688 2687                  extpages = minctg;
2689 2688          }
2690 2689          if (extpages > minctg) {
2691 2690                  extra = extpages - minctg;
2692 2691                  if (!page_resv(extra, kflags))
2693 2692                          return (NULL);
2694 2693          }
2695 2694          pp_first = NULL;
2696 2695          pplist = kmem_alloc(extpages * sizeof (page_t *), kflags);
2697 2696          if (pplist == NULL)
2698 2697                  goto balloon_fail;
2699 2698          mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags);
2700 2699          if (mfnlist == NULL)
2701 2700                  goto balloon_fail;
2702 2701          pp = page_create_va(vp, off, minctg * PAGESIZE, flags, &kvseg, vaddr);
2703 2702          if (pp == NULL)
2704 2703                  goto balloon_fail;
2705 2704          pp_first = pp;
2706 2705          if (extpages > minctg) {
2707 2706                  /*
2708 2707                   * fill out the rest of extent pages to swap
2709 2708                   * with the hypervisor
2710 2709                   */
2711 2710                  for (i = 0; i < extra; i++) {
2712 2711                          expp = page_create_va(vp,
2713 2712                              (u_offset_t)(uintptr_t)io_pool_kva,
2714 2713                              PAGESIZE, flags, &kvseg, io_pool_kva);
2715 2714                          if (expp == NULL)
2716 2715                                  goto balloon_fail;
2717 2716                          (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD);
2718 2717                          page_io_unlock(expp);
2719 2718                          page_hashout(expp, NULL);
2720 2719                          page_io_lock(expp);
2721 2720                          /*
2722 2721                           * add page to end of list
2723 2722                           */
2724 2723                          expp->p_prev = pp_first->p_prev;
2725 2724                          expp->p_next = pp_first;
2726 2725                          expp->p_prev->p_next = expp;
2727 2726                          pp_first->p_prev = expp;
2728 2727                  }
2729 2728  
2730 2729          }
2731 2730          for (i = 0; i < extpages; i++) {
2732 2731                  pplist[i] = pp;
2733 2732                  pp = pp->p_next;
2734 2733          }
2735 2734          nbits = highbit(mattr->dma_attr_addr_hi);
2736 2735          extents = contig ? 1 : minctg;
2737 2736          if (balloon_replace_pages(extents, pplist, nbits, order,
2738 2737              mfnlist) != extents) {
2739 2738                  if (ioalloc_dbg)
2740 2739                          cmn_err(CE_NOTE, "request to hypervisor"
2741 2740                              " for %d pages, maxaddr %" PRIx64 " failed",
2742 2741                              extpages, mattr->dma_attr_addr_hi);
2743 2742                  goto balloon_fail;
2744 2743          }
2745 2744  
2746 2745          kmem_free(pplist, extpages * sizeof (page_t *));
2747 2746          kmem_free(mfnlist, extpages * sizeof (mfn_t));
2748 2747          /*
2749 2748           * Return any excess pages to free list
2750 2749           */
2751 2750          if (extpages > minctg) {
2752 2751                  for (i = 0; i < extra; i++) {
2753 2752                          pp = pp_first->p_prev;
2754 2753                          page_sub(&pp_first, pp);
2755 2754                          page_io_unlock(pp);
2756 2755                          page_unresv(1);
2757 2756                          page_free(pp, 1);
2758 2757                  }
2759 2758          }
2760 2759          return (pp_first);
2761 2760  balloon_fail:
2762 2761          /*
2763 2762           * Return pages to free list and return failure
2764 2763           */
2765 2764          while (pp_first != NULL) {
2766 2765                  pp = pp_first;
2767 2766                  page_sub(&pp_first, pp);
2768 2767                  page_io_unlock(pp);
2769 2768                  if (pp->p_vnode != NULL)
2770 2769                          page_hashout(pp, NULL);
2771 2770                  page_free(pp, 1);
2772 2771          }
2773 2772          if (pplist)
2774 2773                  kmem_free(pplist, extpages * sizeof (page_t *));
2775 2774          if (mfnlist)
2776 2775                  kmem_free(mfnlist, extpages * sizeof (mfn_t));
2777 2776          page_unresv(extpages - minctg);
2778 2777          return (NULL);
2779 2778  }
2780 2779  
2781 2780  static void
2782 2781  return_partial_alloc(page_t *plist)
2783 2782  {
2784 2783          page_t *pp;
2785 2784  
2786 2785          while (plist != NULL) {
2787 2786                  pp = plist;
2788 2787                  page_sub(&plist, pp);
2789 2788                  page_io_unlock(pp);
2790 2789                  page_destroy_io(pp);
2791 2790          }
2792 2791  }
2793 2792  
2794 2793  static page_t *
2795 2794  page_get_contigpages(
2796 2795          struct vnode    *vp,
2797 2796          u_offset_t      off,
2798 2797          int             *npagesp,
2799 2798          uint_t          flags,
2800 2799          caddr_t         vaddr,
2801 2800          ddi_dma_attr_t  *mattr)
2802 2801  {
2803 2802          mfn_t   max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2804 2803          page_t  *plist; /* list to return */
2805 2804          page_t  *pp, *mcpl;
2806 2805          int     contig, anyaddr, npages, getone = 0;
2807 2806          mfn_t   lo_mfn;
2808 2807          mfn_t   hi_mfn;
2809 2808          pgcnt_t pfnalign = 0;
2810 2809          int     align, sgllen;
2811 2810          uint64_t pfnseg;
2812 2811          pgcnt_t minctg;
2813 2812  
2814 2813          npages = *npagesp;
2815 2814          ASSERT(mattr != NULL);
2816 2815          lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2817 2816          hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2818 2817          sgllen = mattr->dma_attr_sgllen;
2819 2818          pfnseg = mmu_btop(mattr->dma_attr_seg);
2820 2819          align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2821 2820          if (align > MMU_PAGESIZE)
2822 2821                  pfnalign = mmu_btop(align);
2823 2822  
2824 2823          contig = flags & PG_PHYSCONTIG;
2825 2824          if (npages == -1) {
2826 2825                  npages = 1;
2827 2826                  pfnalign = 0;
2828 2827          }
2829 2828          /*
2830 2829           * Clear the contig flag if only one page is needed.
2831 2830           */
2832 2831          if (npages == 1) {
2833 2832                  getone = 1;
2834 2833                  contig = 0;
2835 2834          }
2836 2835  
2837 2836          /*
2838 2837           * Check if any page in the system is fine.
2839 2838           */
2840 2839          anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn;
2841 2840          if (!contig && anyaddr && !pfnalign) {
2842 2841                  flags &= ~PG_PHYSCONTIG;
2843 2842                  plist = page_create_va(vp, off, npages * MMU_PAGESIZE,
2844 2843                      flags, &kvseg, vaddr);
2845 2844                  if (plist != NULL) {
2846 2845                          *npagesp = 0;
2847 2846                          return (plist);
2848 2847                  }
2849 2848          }
2850 2849          plist = NULL;
2851 2850          minctg = howmany(npages, sgllen);
2852 2851          while (npages > sgllen || getone) {
2853 2852                  if (minctg > npages)
2854 2853                          minctg = npages;
2855 2854                  mcpl = NULL;
2856 2855                  /*
2857 2856                   * We could want contig pages with no address range limits.
2858 2857                   */
2859 2858                  if (anyaddr && contig) {
2860 2859                          /*
2861 2860                           * Look for free contig pages to satisfy the request.
2862 2861                           */
2863 2862                          mcpl = find_contig_free(minctg, flags, pfnseg,
2864 2863                              pfnalign);
2865 2864                  }
2866 2865                  /*
2867 2866                   * Try the reserved io pools next
2868 2867                   */
2869 2868                  if (mcpl == NULL)
2870 2869                          mcpl = page_io_pool_alloc(mattr, contig, minctg);
2871 2870                  if (mcpl != NULL) {
2872 2871                          pp = mcpl;
2873 2872                          do {
2874 2873                                  if (!page_hashin(pp, vp, off, NULL)) {
2875 2874                                          panic("page_get_contigpages:"
2876 2875                                              " hashin failed"
2877 2876                                              " pp %p, vp %p, off %llx",
2878 2877                                              (void *)pp, (void *)vp, off);
2879 2878                                  }
2880 2879                                  off += MMU_PAGESIZE;
2881 2880                                  PP_CLRFREE(pp);
2882 2881                                  PP_CLRAGED(pp);
2883 2882                                  page_set_props(pp, P_REF);
2884 2883                                  page_io_lock(pp);
2885 2884                                  pp = pp->p_next;
2886 2885                          } while (pp != mcpl);
2887 2886                  } else {
2888 2887                          /*
2889 2888                           * Hypervisor exchange doesn't handle segment or
2890 2889                           * alignment constraints
2891 2890                           */
2892 2891                          if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi ||
2893 2892                              pfnalign)
2894 2893                                  goto fail;
2895 2894                          /*
2896 2895                           * Try exchanging pages with the hypervisor
2897 2896                           */
2898 2897                          mcpl = page_swap_with_hypervisor(vp, off, vaddr, mattr,
2899 2898                              flags, minctg);
2900 2899                          if (mcpl == NULL)
2901 2900                                  goto fail;
2902 2901                          off += minctg * MMU_PAGESIZE;
2903 2902                  }
2904 2903                  check_dma(mattr, mcpl, minctg);
2905 2904                  /*
2906 2905                   * Here with a minctg run of contiguous pages, add them to the
2907 2906                   * list we will return for this request.
2908 2907                   */
2909 2908                  page_list_concat(&plist, &mcpl);
2910 2909                  npages -= minctg;
2911 2910                  *npagesp = npages;
2912 2911                  sgllen--;
2913 2912                  if (getone)
2914 2913                          break;
2915 2914          }
2916 2915          return (plist);
2917 2916  fail:
2918 2917          return_partial_alloc(plist);
2919 2918          return (NULL);
2920 2919  }
2921 2920  
2922 2921  /*
2923 2922   * Allocator for domain 0 I/O pages. We match the required
2924 2923   * DMA attributes and contiguity constraints.
2925 2924   */
2926 2925  /*ARGSUSED*/
2927 2926  page_t *
2928 2927  page_create_io(
2929 2928          struct vnode    *vp,
2930 2929          u_offset_t      off,
2931 2930          uint_t          bytes,
2932 2931          uint_t          flags,
2933 2932          struct as       *as,
2934 2933          caddr_t         vaddr,
2935 2934          ddi_dma_attr_t  *mattr)
2936 2935  {
2937 2936          page_t  *plist = NULL, *pp;
2938 2937          int     npages = 0, contig, anyaddr, pages_req;
2939 2938          mfn_t   lo_mfn;
2940 2939          mfn_t   hi_mfn;
2941 2940          pgcnt_t pfnalign = 0;
2942 2941          int     align;
2943 2942          int     is_domu = 0;
2944 2943          int     dummy, bytes_got;
2945 2944          mfn_t   max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2946 2945  
2947 2946          ASSERT(mattr != NULL);
2948 2947          lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2949 2948          hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2950 2949          align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2951 2950          if (align > MMU_PAGESIZE)
2952 2951                  pfnalign = mmu_btop(align);
2953 2952  
2954 2953          /*
2955 2954           * Clear the contig flag if only one page is needed or the scatter
2956 2955           * gather list length is >= npages.
2957 2956           */
2958 2957          pages_req = npages = mmu_btopr(bytes);
2959 2958          contig = (flags & PG_PHYSCONTIG);
2960 2959          bytes = P2ROUNDUP(bytes, MMU_PAGESIZE);
2961 2960          if (bytes == MMU_PAGESIZE || mattr->dma_attr_sgllen >= npages)
2962 2961                  contig = 0;
2963 2962  
2964 2963          /*
2965 2964           * Check if any old page in the system is fine.
2966 2965           * DomU should always go down this path.
2967 2966           */
2968 2967          is_domu = !DOMAIN_IS_INITDOMAIN(xen_info);
2969 2968          anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign;
2970 2969          if ((!contig && anyaddr) || is_domu) {
2971 2970                  flags &= ~PG_PHYSCONTIG;
2972 2971                  plist = page_create_va(vp, off, bytes, flags, &kvseg, vaddr);
2973 2972                  if (plist != NULL)
2974 2973                          return (plist);
2975 2974                  else if (is_domu)
2976 2975                          return (NULL); /* no memory available */
2977 2976          }
2978 2977          /*
2979 2978           * DomU should never reach here
2980 2979           */
2981 2980          if (contig) {
2982 2981                  plist = page_get_contigpages(vp, off, &npages, flags, vaddr,
2983 2982                      mattr);
2984 2983                  if (plist == NULL)
2985 2984                          goto fail;
2986 2985                  bytes_got = (pages_req - npages) << MMU_PAGESHIFT;
2987 2986                  vaddr += bytes_got;
2988 2987                  off += bytes_got;
2989 2988                  /*
2990 2989                   * We now have all the contiguous pages we need, but
2991 2990                   * we may still need additional non-contiguous pages.
2992 2991                   */
2993 2992          }
2994 2993          /*
2995 2994           * now loop collecting the requested number of pages, these do
2996 2995           * not have to be contiguous pages but we will use the contig
2997 2996           * page alloc code to get the pages since it will honor any
2998 2997           * other constraints the pages may have.
2999 2998           */
3000 2999          while (npages--) {
3001 3000                  dummy = -1;
3002 3001                  pp = page_get_contigpages(vp, off, &dummy, flags, vaddr, mattr);
3003 3002                  if (pp == NULL)
3004 3003                          goto fail;
3005 3004                  page_add(&plist, pp);
3006 3005                  vaddr += MMU_PAGESIZE;
3007 3006                  off += MMU_PAGESIZE;
3008 3007          }
3009 3008          return (plist);
3010 3009  fail:
3011 3010          /*
3012 3011           * Failed to get enough pages, return ones we did get
3013 3012           */
3014 3013          return_partial_alloc(plist);
3015 3014          return (NULL);
3016 3015  }
3017 3016  
3018 3017  /*
3019 3018   * Lock and return the page with the highest mfn that we can find.  last_mfn
3020 3019   * holds the last one found, so the next search can start from there.  We
3021 3020   * also keep a counter so that we don't loop forever if the machine has no
3022 3021   * free pages.
3023 3022   *
3024 3023   * This is called from the balloon thread to find pages to give away.  new_high
3025 3024   * is used when new mfn's have been added to the system - we will reset our
3026 3025   * search if the new mfn's are higher than our current search position.
3027 3026   */
3028 3027  page_t *
3029 3028  page_get_high_mfn(mfn_t new_high)
3030 3029  {
3031 3030          static mfn_t last_mfn = 0;
3032 3031          pfn_t pfn;
3033 3032          page_t *pp;
3034 3033          ulong_t loop_count = 0;
3035 3034  
3036 3035          if (new_high > last_mfn)
3037 3036                  last_mfn = new_high;
3038 3037  
3039 3038          for (; loop_count < mfn_count; loop_count++, last_mfn--) {
3040 3039                  if (last_mfn == 0) {
3041 3040                          last_mfn = cached_max_mfn;
3042 3041                  }
3043 3042  
3044 3043                  pfn = mfn_to_pfn(last_mfn);
3045 3044                  if (pfn & PFN_IS_FOREIGN_MFN)
3046 3045                          continue;
3047 3046  
3048 3047                  /* See if the page is free.  If so, lock it. */
3049 3048                  pp = page_numtopp_alloc(pfn);
3050 3049                  if (pp == NULL)
3051 3050                          continue;
3052 3051                  PP_CLRFREE(pp);
3053 3052  
3054 3053                  ASSERT(PAGE_EXCL(pp));
3055 3054                  ASSERT(pp->p_vnode == NULL);
3056 3055                  ASSERT(!hat_page_is_mapped(pp));
3057 3056                  last_mfn--;
3058 3057                  return (pp);
3059 3058          }
3060 3059          return (NULL);
3061 3060  }
3062 3061  
3063 3062  #else /* !__xpv */
3064 3063  
3065 3064  /*
3066 3065   * get a page from any list with the given mnode
3067 3066   */
3068 3067  static page_t *
3069 3068  page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags,
3070 3069      int mnode, int mtype, ddi_dma_attr_t *dma_attr)
3071 3070  {
3072 3071          kmutex_t                *pcm;
3073 3072          int                     i;
3074 3073          page_t                  *pp;
3075 3074          page_t                  *first_pp;
3076 3075          uint64_t                pgaddr;
3077 3076          ulong_t                 bin;
3078 3077          int                     mtypestart;
3079 3078          int                     plw_initialized;
3080 3079          page_list_walker_t      plw;
3081 3080  
3082 3081          VM_STAT_ADD(pga_vmstats.pgma_alloc);
3083 3082  
3084 3083          ASSERT((flags & PG_MATCH_COLOR) == 0);
3085 3084          ASSERT(szc == 0);
3086 3085          ASSERT(dma_attr != NULL);
3087 3086  
3088 3087          MTYPE_START(mnode, mtype, flags);
3089 3088          if (mtype < 0) {
3090 3089                  VM_STAT_ADD(pga_vmstats.pgma_allocempty);
3091 3090                  return (NULL);
3092 3091          }
3093 3092  
3094 3093          mtypestart = mtype;
3095 3094  
3096 3095          bin = origbin;
3097 3096  
3098 3097          /*
3099 3098           * check up to page_colors + 1 bins - origbin may be checked twice
3100 3099           * because of BIN_STEP skip
3101 3100           */
3102 3101          do {
3103 3102                  plw_initialized = 0;
3104 3103  
3105 3104                  for (plw.plw_count = 0;
3106 3105                      plw.plw_count < page_colors; plw.plw_count++) {
3107 3106  
3108 3107                          if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL)
3109 3108                                  goto nextfreebin;
3110 3109  
3111 3110                          pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST);
3112 3111                          mutex_enter(pcm);
3113 3112                          pp = PAGE_FREELISTS(mnode, szc, bin, mtype);
3114 3113                          first_pp = pp;
3115 3114                          while (pp != NULL) {
3116 3115                                  if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3117 3116                                      SE_EXCL) == 0) {
3118 3117                                          pp = pp->p_next;
3119 3118                                          if (pp == first_pp) {
3120 3119                                                  pp = NULL;
3121 3120                                          }
3122 3121                                          continue;
3123 3122                                  }
3124 3123  
3125 3124                                  ASSERT(PP_ISFREE(pp));
3126 3125                                  ASSERT(PP_ISAGED(pp));
3127 3126                                  ASSERT(pp->p_vnode == NULL);
3128 3127                                  ASSERT(pp->p_hash == NULL);
3129 3128                                  ASSERT(pp->p_offset == (u_offset_t)-1);
3130 3129                                  ASSERT(pp->p_szc == szc);
3131 3130                                  ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3132 3131                                  /* check if page within DMA attributes */
3133 3132                                  pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3134 3133                                  if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3135 3134                                      (pgaddr + MMU_PAGESIZE - 1 <=
3136 3135                                      dma_attr->dma_attr_addr_hi)) {
3137 3136                                          break;
3138 3137                                  }
3139 3138  
3140 3139                                  /* continue looking */
3141 3140                                  page_unlock(pp);
3142 3141                                  pp = pp->p_next;
3143 3142                                  if (pp == first_pp)
3144 3143                                          pp = NULL;
3145 3144  
3146 3145                          }
3147 3146                          if (pp != NULL) {
3148 3147                                  ASSERT(mtype == PP_2_MTYPE(pp));
3149 3148                                  ASSERT(pp->p_szc == 0);
3150 3149  
3151 3150                                  /* found a page with specified DMA attributes */
3152 3151                                  page_sub(&PAGE_FREELISTS(mnode, szc, bin,
3153 3152                                      mtype), pp);
3154 3153                                  page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
3155 3154  
3156 3155                                  if ((PP_ISFREE(pp) == 0) ||
3157 3156                                      (PP_ISAGED(pp) == 0)) {
3158 3157                                          cmn_err(CE_PANIC, "page %p is not free",
3159 3158                                              (void *)pp);
3160 3159                                  }
3161 3160  
3162 3161                                  mutex_exit(pcm);
3163 3162                                  check_dma(dma_attr, pp, 1);
3164 3163                                  VM_STAT_ADD(pga_vmstats.pgma_allocok);
3165 3164                                  return (pp);
3166 3165                          }
3167 3166                          mutex_exit(pcm);
3168 3167  nextfreebin:
3169 3168                          if (plw_initialized == 0) {
3170 3169                                  page_list_walk_init(szc, 0, bin, 1, 0, &plw);
3171 3170                                  ASSERT(plw.plw_ceq_dif == page_colors);
3172 3171                                  plw_initialized = 1;
3173 3172                          }
3174 3173  
3175 3174                          if (plw.plw_do_split) {
3176 3175                                  pp = page_freelist_split(szc, bin, mnode,
3177 3176                                      mtype,
3178 3177                                      mmu_btop(dma_attr->dma_attr_addr_lo),
3179 3178                                      mmu_btop(dma_attr->dma_attr_addr_hi + 1),
3180 3179                                      &plw);
3181 3180                                  if (pp != NULL) {
3182 3181                                          check_dma(dma_attr, pp, 1);
3183 3182                                          return (pp);
3184 3183                                  }
3185 3184                          }
3186 3185  
3187 3186                          bin = page_list_walk_next_bin(szc, bin, &plw);
3188 3187                  }
3189 3188  
3190 3189                  MTYPE_NEXT(mnode, mtype, flags);
3191 3190          } while (mtype >= 0);
3192 3191  
3193 3192          /* failed to find a page in the freelist; try it in the cachelist */
3194 3193  
3195 3194          /* reset mtype start for cachelist search */
3196 3195          mtype = mtypestart;
3197 3196          ASSERT(mtype >= 0);
3198 3197  
3199 3198          /* start with the bin of matching color */
3200 3199          bin = origbin;
3201 3200  
3202 3201          do {
3203 3202                  for (i = 0; i <= page_colors; i++) {
3204 3203                          if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL)
3205 3204                                  goto nextcachebin;
3206 3205                          pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST);
3207 3206                          mutex_enter(pcm);
3208 3207                          pp = PAGE_CACHELISTS(mnode, bin, mtype);
3209 3208                          first_pp = pp;
3210 3209                          while (pp != NULL) {
3211 3210                                  if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3212 3211                                      SE_EXCL) == 0) {
3213 3212                                          pp = pp->p_next;
3214 3213                                          if (pp == first_pp)
3215 3214                                                  pp = NULL;
3216 3215                                          continue;
3217 3216                                  }
3218 3217                                  ASSERT(pp->p_vnode);
3219 3218                                  ASSERT(PP_ISAGED(pp) == 0);
3220 3219                                  ASSERT(pp->p_szc == 0);
3221 3220                                  ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3222 3221  
3223 3222                                  /* check if page within DMA attributes */
3224 3223  
3225 3224                                  pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3226 3225                                  if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3227 3226                                      (pgaddr + MMU_PAGESIZE - 1 <=
3228 3227                                      dma_attr->dma_attr_addr_hi)) {
3229 3228                                          break;
3230 3229                                  }
3231 3230  
3232 3231                                  /* continue looking */
3233 3232                                  page_unlock(pp);
3234 3233                                  pp = pp->p_next;
3235 3234                                  if (pp == first_pp)
3236 3235                                          pp = NULL;
3237 3236                          }
3238 3237  
3239 3238                          if (pp != NULL) {
3240 3239                                  ASSERT(mtype == PP_2_MTYPE(pp));
3241 3240                                  ASSERT(pp->p_szc == 0);
3242 3241  
3243 3242                                  /* found a page with specified DMA attributes */
3244 3243                                  page_sub(&PAGE_CACHELISTS(mnode, bin,
3245 3244                                      mtype), pp);
3246 3245                                  page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST);
3247 3246  
3248 3247                                  mutex_exit(pcm);
3249 3248                                  ASSERT(pp->p_vnode);
3250 3249                                  ASSERT(PP_ISAGED(pp) == 0);
3251 3250                                  check_dma(dma_attr, pp, 1);
3252 3251                                  VM_STAT_ADD(pga_vmstats.pgma_allocok);
3253 3252                                  return (pp);
3254 3253                          }
3255 3254                          mutex_exit(pcm);
3256 3255  nextcachebin:
3257 3256                          bin += (i == 0) ? BIN_STEP : 1;
3258 3257                          bin &= page_colors_mask;
3259 3258                  }
3260 3259                  MTYPE_NEXT(mnode, mtype, flags);
3261 3260          } while (mtype >= 0);
3262 3261  
3263 3262          VM_STAT_ADD(pga_vmstats.pgma_allocfailed);
3264 3263          return (NULL);
3265 3264  }
3266 3265  
3267 3266  /*
3268 3267   * This function is similar to page_get_freelist()/page_get_cachelist()
3269 3268   * but it searches both the lists to find a page with the specified
3270 3269   * color (or no color) and DMA attributes. The search is done in the
3271 3270   * freelist first and then in the cache list within the highest memory
3272 3271   * range (based on DMA attributes) before searching in the lower
3273 3272   * memory ranges.
3274 3273   *
3275 3274   * Note: This function is called only by page_create_io().
3276 3275   */
3277 3276  /*ARGSUSED*/
3278 3277  static page_t *
3279 3278  page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr,
3280 3279      size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp)
3281 3280  {
3282 3281          uint_t          bin;
3283 3282          int             mtype;
3284 3283          page_t          *pp;
3285 3284          int             n;
3286 3285          int             m;
3287 3286          int             szc;
3288 3287          int             fullrange;
3289 3288          int             mnode;
3290 3289          int             local_failed_stat = 0;
3291 3290          lgrp_mnode_cookie_t     lgrp_cookie;
3292 3291  
3293 3292          VM_STAT_ADD(pga_vmstats.pga_alloc);
3294 3293  
3295 3294          /* only base pagesize currently supported */
3296 3295          if (size != MMU_PAGESIZE)
3297 3296                  return (NULL);
3298 3297  
3299 3298          /*
3300 3299           * If we're passed a specific lgroup, we use it.  Otherwise,
3301 3300           * assume first-touch placement is desired.
3302 3301           */
3303 3302          if (!LGRP_EXISTS(lgrp))
3304 3303                  lgrp = lgrp_home_lgrp();
3305 3304  
3306 3305          /* LINTED */
3307 3306          AS_2_BIN(as, seg, vp, vaddr, bin, 0);
3308 3307  
3309 3308          /*
3310 3309           * Only hold one freelist or cachelist lock at a time, that way we
3311 3310           * can start anywhere and not have to worry about lock
3312 3311           * ordering.
3313 3312           */
3314 3313          if (dma_attr == NULL) {
3315 3314                  n = mtype16m;
3316 3315                  m = mtypetop;
3317 3316                  fullrange = 1;
3318 3317                  VM_STAT_ADD(pga_vmstats.pga_nulldmaattr);
3319 3318          } else {
3320 3319                  pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo);
3321 3320                  pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi);
3322 3321  
3323 3322                  /*
3324 3323                   * We can guarantee alignment only for page boundary.
3325 3324                   */
3326 3325                  if (dma_attr->dma_attr_align > MMU_PAGESIZE)
3327 3326                          return (NULL);
3328 3327  
3329 3328                  /* Sanity check the dma_attr */
3330 3329                  if (pfnlo > pfnhi)
3331 3330                          return (NULL);
3332 3331  
3333 3332                  n = pfn_2_mtype(pfnlo);
3334 3333                  m = pfn_2_mtype(pfnhi);
3335 3334  
3336 3335                  fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) &&
3337 3336                      (pfnhi >= mnoderanges[m].mnr_pfnhi));
3338 3337          }
3339 3338          VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange);
3340 3339  
3341 3340          szc = 0;
3342 3341  
3343 3342          /* cylcing thru mtype handled by RANGE0 if n == mtype16m */
3344 3343          if (n == mtype16m) {
3345 3344                  flags |= PGI_MT_RANGE0;
3346 3345                  n = m;
3347 3346          }
3348 3347  
3349 3348          /*
3350 3349           * Try local memory node first, but try remote if we can't
3351 3350           * get a page of the right color.
3352 3351           */
3353 3352          LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER);
3354 3353          while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) {
3355 3354                  /*
3356 3355                   * allocate pages from high pfn to low.
3357 3356                   */
3358 3357                  mtype = m;
3359 3358                  do {
3360 3359                          if (fullrange != 0) {
3361 3360                                  pp = page_get_mnode_freelist(mnode,
3362 3361                                      bin, mtype, szc, flags);
3363 3362                                  if (pp == NULL) {
3364 3363                                          pp = page_get_mnode_cachelist(
3365 3364                                              bin, flags, mnode, mtype);
3366 3365                                  }
3367 3366                          } else {
3368 3367                                  pp = page_get_mnode_anylist(bin, szc,
3369 3368                                      flags, mnode, mtype, dma_attr);
3370 3369                          }
3371 3370                          if (pp != NULL) {
3372 3371                                  VM_STAT_ADD(pga_vmstats.pga_allocok);
3373 3372                                  check_dma(dma_attr, pp, 1);
3374 3373                                  return (pp);
3375 3374                          }
3376 3375                  } while (mtype != n &&
3377 3376                      (mtype = mnoderanges[mtype].mnr_next) != -1);
3378 3377                  if (!local_failed_stat) {
3379 3378                          lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1);
3380 3379                          local_failed_stat = 1;
3381 3380                  }
3382 3381          }
3383 3382          VM_STAT_ADD(pga_vmstats.pga_allocfailed);
3384 3383  
3385 3384          return (NULL);
3386 3385  }
3387 3386  
3388 3387  /*
3389 3388   * page_create_io()
3390 3389   *
3391 3390   * This function is a copy of page_create_va() with an additional
3392 3391   * argument 'mattr' that specifies DMA memory requirements to
3393 3392   * the page list functions. This function is used by the segkmem
3394 3393   * allocator so it is only to create new pages (i.e PG_EXCL is
3395 3394   * set).
3396 3395   *
3397 3396   * Note: This interface is currently used by x86 PSM only and is
3398 3397   *       not fully specified so the commitment level is only for
3399 3398   *       private interface specific to x86. This interface uses PSM
3400 3399   *       specific page_get_anylist() interface.
3401 3400   */
3402 3401  
3403 3402  #define PAGE_HASH_SEARCH(index, pp, vp, off) { \
3404 3403          for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
3405 3404                  if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
3406 3405                          break; \
3407 3406          } \
3408 3407  }
3409 3408  
3410 3409  
3411 3410  page_t *
3412 3411  page_create_io(
3413 3412          struct vnode    *vp,
3414 3413          u_offset_t      off,
3415 3414          uint_t          bytes,
3416 3415          uint_t          flags,
3417 3416          struct as       *as,
3418 3417          caddr_t         vaddr,
3419 3418          ddi_dma_attr_t  *mattr) /* DMA memory attributes if any */
3420 3419  {
3421 3420          page_t          *plist = NULL;
3422 3421          uint_t          plist_len = 0;
3423 3422          pgcnt_t         npages;
3424 3423          page_t          *npp = NULL;
3425 3424          uint_t          pages_req;
3426 3425          page_t          *pp;
3427 3426          kmutex_t        *phm = NULL;
3428 3427          uint_t          index;
3429 3428  
3430 3429          TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
3431 3430              "page_create_start:vp %p off %llx bytes %u flags %x",
3432 3431              vp, off, bytes, flags);
3433 3432  
3434 3433          ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0);
3435 3434  
3436 3435          pages_req = npages = mmu_btopr(bytes);
3437 3436  
3438 3437          /*
3439 3438           * Do the freemem and pcf accounting.
3440 3439           */
3441 3440          if (!page_create_wait(npages, flags)) {
3442 3441                  return (NULL);
3443 3442          }
3444 3443  
3445 3444          TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
3446 3445              "page_create_success:vp %p off %llx", vp, off);
3447 3446  
3448 3447          /*
3449 3448           * If satisfying this request has left us with too little
3450 3449           * memory, start the wheels turning to get some back.  The
3451 3450           * first clause of the test prevents waking up the pageout
3452 3451           * daemon in situations where it would decide that there's
3453 3452           * nothing to do.
3454 3453           */
3455 3454          if (nscan < desscan && freemem < minfree) {
3456 3455                  TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
3457 3456                      "pageout_cv_signal:freemem %ld", freemem);
3458 3457                  cv_signal(&proc_pageout->p_cv);
3459 3458          }
3460 3459  
3461 3460          if (flags & PG_PHYSCONTIG) {
3462 3461  
3463 3462                  plist = page_get_contigpage(&npages, mattr, 1);
3464 3463                  if (plist == NULL) {
3465 3464                          page_create_putback(npages);
3466 3465                          return (NULL);
3467 3466                  }
3468 3467  
3469 3468                  pp = plist;
3470 3469  
3471 3470                  do {
3472 3471                          if (!page_hashin(pp, vp, off, NULL)) {
3473 3472                                  panic("pg_creat_io: hashin failed %p %p %llx",
3474 3473                                      (void *)pp, (void *)vp, off);
3475 3474                          }
3476 3475                          VM_STAT_ADD(page_create_new);
3477 3476                          off += MMU_PAGESIZE;
3478 3477                          PP_CLRFREE(pp);
3479 3478                          PP_CLRAGED(pp);
3480 3479                          page_set_props(pp, P_REF);
3481 3480                          pp = pp->p_next;
3482 3481                  } while (pp != plist);
3483 3482  
3484 3483                  if (!npages) {
3485 3484                          check_dma(mattr, plist, pages_req);
3486 3485                          return (plist);
3487 3486                  } else {
3488 3487                          vaddr += (pages_req - npages) << MMU_PAGESHIFT;
3489 3488                  }
3490 3489  
3491 3490                  /*
3492 3491                   * fall-thru:
3493 3492                   *
3494 3493                   * page_get_contigpage returns when npages <= sgllen.
3495 3494                   * Grab the rest of the non-contig pages below from anylist.
3496 3495                   */
3497 3496          }
3498 3497  
3499 3498          /*
3500 3499           * Loop around collecting the requested number of pages.
3501 3500           * Most of the time, we have to `create' a new page. With
3502 3501           * this in mind, pull the page off the free list before
3503 3502           * getting the hash lock.  This will minimize the hash
3504 3503           * lock hold time, nesting, and the like.  If it turns
3505 3504           * out we don't need the page, we put it back at the end.
3506 3505           */
3507 3506          while (npages--) {
3508 3507                  phm = NULL;
3509 3508  
3510 3509                  index = PAGE_HASH_FUNC(vp, off);
3511 3510  top:
3512 3511                  ASSERT(phm == NULL);
3513 3512                  ASSERT(index == PAGE_HASH_FUNC(vp, off));
3514 3513                  ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3515 3514  
3516 3515                  if (npp == NULL) {
3517 3516                          /*
3518 3517                           * Try to get the page of any color either from
3519 3518                           * the freelist or from the cache list.
3520 3519                           */
3521 3520                          npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE,
3522 3521                              flags & ~PG_MATCH_COLOR, mattr, NULL);
3523 3522                          if (npp == NULL) {
3524 3523                                  if (mattr == NULL) {
3525 3524                                          /*
3526 3525                                           * Not looking for a special page;
3527 3526                                           * panic!
3528 3527                                           */
3529 3528                                          panic("no page found %d", (int)npages);
3530 3529                                  }
3531 3530                                  /*
3532 3531                                   * No page found! This can happen
3533 3532                                   * if we are looking for a page
3534 3533                                   * within a specific memory range
3535 3534                                   * for DMA purposes. If PG_WAIT is
3536 3535                                   * specified then we wait for a
3537 3536                                   * while and then try again. The
3538 3537                                   * wait could be forever if we
3539 3538                                   * don't get the page(s) we need.
3540 3539                                   *
3541 3540                                   * Note: XXX We really need a mechanism
3542 3541                                   * to wait for pages in the desired
3543 3542                                   * range. For now, we wait for any
3544 3543                                   * pages and see if we can use it.
3545 3544                                   */
3546 3545  
3547 3546                                  if ((mattr != NULL) && (flags & PG_WAIT)) {
3548 3547                                          delay(10);
3549 3548                                          goto top;
3550 3549                                  }
3551 3550                                  goto fail; /* undo accounting stuff */
3552 3551                          }
3553 3552  
3554 3553                          if (PP_ISAGED(npp) == 0) {
3555 3554                                  /*
3556 3555                                   * Since this page came from the
3557 3556                                   * cachelist, we must destroy the
3558 3557                                   * old vnode association.
3559 3558                                   */
3560 3559                                  page_hashout(npp, (kmutex_t *)NULL);
3561 3560                          }
3562 3561                  }
3563 3562  
3564 3563                  /*
3565 3564                   * We own this page!
3566 3565                   */
3567 3566                  ASSERT(PAGE_EXCL(npp));
3568 3567                  ASSERT(npp->p_vnode == NULL);
3569 3568                  ASSERT(!hat_page_is_mapped(npp));
3570 3569                  PP_CLRFREE(npp);
3571 3570                  PP_CLRAGED(npp);
3572 3571  
3573 3572                  /*
3574 3573                   * Here we have a page in our hot little mits and are
3575 3574                   * just waiting to stuff it on the appropriate lists.
3576 3575                   * Get the mutex and check to see if it really does
3577 3576                   * not exist.
3578 3577                   */
3579 3578                  phm = PAGE_HASH_MUTEX(index);
3580 3579                  mutex_enter(phm);
3581 3580                  PAGE_HASH_SEARCH(index, pp, vp, off);
3582 3581                  if (pp == NULL) {
3583 3582                          VM_STAT_ADD(page_create_new);
3584 3583                          pp = npp;
3585 3584                          npp = NULL;
3586 3585                          if (!page_hashin(pp, vp, off, phm)) {
3587 3586                                  /*
3588 3587                                   * Since we hold the page hash mutex and
3589 3588                                   * just searched for this page, page_hashin
3590 3589                                   * had better not fail.  If it does, that
3591 3590                                   * means somethread did not follow the
3592 3591                                   * page hash mutex rules.  Panic now and
3593 3592                                   * get it over with.  As usual, go down
3594 3593                                   * holding all the locks.
3595 3594                                   */
3596 3595                                  ASSERT(MUTEX_HELD(phm));
3597 3596                                  panic("page_create: hashin fail %p %p %llx %p",
3598 3597                                      (void *)pp, (void *)vp, off, (void *)phm);
3599 3598  
3600 3599                          }
3601 3600                          ASSERT(MUTEX_HELD(phm));
3602 3601                          mutex_exit(phm);
3603 3602                          phm = NULL;
3604 3603  
3605 3604                          /*
3606 3605                           * Hat layer locking need not be done to set
3607 3606                           * the following bits since the page is not hashed
3608 3607                           * and was on the free list (i.e., had no mappings).
3609 3608                           *
3610 3609                           * Set the reference bit to protect
3611 3610                           * against immediate pageout
3612 3611                           *
3613 3612                           * XXXmh modify freelist code to set reference
3614 3613                           * bit so we don't have to do it here.
3615 3614                           */
3616 3615                          page_set_props(pp, P_REF);
3617 3616                  } else {
3618 3617                          ASSERT(MUTEX_HELD(phm));
3619 3618                          mutex_exit(phm);
3620 3619                          phm = NULL;
3621 3620                          /*
3622 3621                           * NOTE: This should not happen for pages associated
3623 3622                           *       with kernel vnode 'kvp'.
3624 3623                           */
3625 3624                          /* XX64 - to debug why this happens! */
3626 3625                          ASSERT(!VN_ISKAS(vp));
3627 3626                          if (VN_ISKAS(vp))
3628 3627                                  cmn_err(CE_NOTE,
3629 3628                                      "page_create: page not expected "
3630 3629                                      "in hash list for kernel vnode - pp 0x%p",
3631 3630                                      (void *)pp);
3632 3631                          VM_STAT_ADD(page_create_exists);
3633 3632                          goto fail;
3634 3633                  }
3635 3634  
3636 3635                  /*
3637 3636                   * Got a page!  It is locked.  Acquire the i/o
3638 3637                   * lock since we are going to use the p_next and
3639 3638                   * p_prev fields to link the requested pages together.
3640 3639                   */
3641 3640                  page_io_lock(pp);
3642 3641                  page_add(&plist, pp);
3643 3642                  plist = plist->p_next;
3644 3643                  off += MMU_PAGESIZE;
3645 3644                  vaddr += MMU_PAGESIZE;
3646 3645          }
3647 3646  
3648 3647          check_dma(mattr, plist, pages_req);
3649 3648          return (plist);
3650 3649  
3651 3650  fail:
3652 3651          if (npp != NULL) {
3653 3652                  /*
3654 3653                   * Did not need this page after all.
3655 3654                   * Put it back on the free list.
3656 3655                   */
3657 3656                  VM_STAT_ADD(page_create_putbacks);
3658 3657                  PP_SETFREE(npp);
3659 3658                  PP_SETAGED(npp);
3660 3659                  npp->p_offset = (u_offset_t)-1;
3661 3660                  page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
3662 3661                  page_unlock(npp);
3663 3662          }
3664 3663  
3665 3664          /*
3666 3665           * Give up the pages we already got.
3667 3666           */
3668 3667          while (plist != NULL) {
3669 3668                  pp = plist;
3670 3669                  page_sub(&plist, pp);
3671 3670                  page_io_unlock(pp);
3672 3671                  plist_len++;
3673 3672                  /*LINTED: constant in conditional ctx*/
3674 3673                  VN_DISPOSE(pp, B_INVAL, 0, kcred);
3675 3674          }
3676 3675  
3677 3676          /*
3678 3677           * VN_DISPOSE does freemem accounting for the pages in plist
3679 3678           * by calling page_free. So, we need to undo the pcf accounting
3680 3679           * for only the remaining pages.
3681 3680           */
3682 3681          VM_STAT_ADD(page_create_putbacks);
3683 3682          page_create_putback(pages_req - plist_len);
3684 3683  
3685 3684          return (NULL);
3686 3685  }
3687 3686  #endif /* !__xpv */
3688 3687  
3689 3688  
3690 3689  /*
3691 3690   * Copy the data from the physical page represented by "frompp" to
3692 3691   * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and
3693 3692   * CPU->cpu_caddr2.  It assumes that no one uses either map at interrupt
3694 3693   * level and no one sleeps with an active mapping there.
3695 3694   *
3696 3695   * Note that the ref/mod bits in the page_t's are not affected by
3697 3696   * this operation, hence it is up to the caller to update them appropriately.
3698 3697   */
3699 3698  int
3700 3699  ppcopy(page_t *frompp, page_t *topp)
3701 3700  {
3702 3701          caddr_t         pp_addr1;
3703 3702          caddr_t         pp_addr2;
3704 3703          hat_mempte_t    pte1;
3705 3704          hat_mempte_t    pte2;
3706 3705          kmutex_t        *ppaddr_mutex;
3707 3706          label_t         ljb;
3708 3707          int             ret = 1;
3709 3708  
3710 3709          ASSERT_STACK_ALIGNED();
3711 3710          ASSERT(PAGE_LOCKED(frompp));
3712 3711          ASSERT(PAGE_LOCKED(topp));
3713 3712  
3714 3713          if (kpm_enable) {
3715 3714                  pp_addr1 = hat_kpm_page2va(frompp, 0);
3716 3715                  pp_addr2 = hat_kpm_page2va(topp, 0);
3717 3716                  kpreempt_disable();
3718 3717          } else {
3719 3718                  /*
3720 3719                   * disable pre-emption so that CPU can't change
3721 3720                   */
3722 3721                  kpreempt_disable();
3723 3722  
3724 3723                  pp_addr1 = CPU->cpu_caddr1;
3725 3724                  pp_addr2 = CPU->cpu_caddr2;
3726 3725                  pte1 = CPU->cpu_caddr1pte;
3727 3726                  pte2 = CPU->cpu_caddr2pte;
3728 3727  
3729 3728                  ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3730 3729                  mutex_enter(ppaddr_mutex);
3731 3730  
3732 3731                  hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1,
3733 3732                      PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST);
3734 3733                  hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2,
3735 3734                      PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3736 3735                      HAT_LOAD_NOCONSIST);
3737 3736          }
3738 3737  
3739 3738          if (on_fault(&ljb)) {
3740 3739                  ret = 0;
3741 3740                  goto faulted;
3742 3741          }
3743 3742          if (use_sse_pagecopy)
3744 3743  #ifdef __xpv
3745 3744                  page_copy_no_xmm(pp_addr2, pp_addr1);
3746 3745  #else
3747 3746                  hwblkpagecopy(pp_addr1, pp_addr2);
3748 3747  #endif
3749 3748          else
3750 3749                  bcopy(pp_addr1, pp_addr2, PAGESIZE);
3751 3750  
3752 3751          no_fault();
3753 3752  faulted:
3754 3753          if (!kpm_enable) {
3755 3754  #ifdef __xpv
3756 3755                  /*
3757 3756                   * We can't leave unused mappings laying about under the
3758 3757                   * hypervisor, so blow them away.
3759 3758                   */
3760 3759                  if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0,
3761 3760                      UVMF_INVLPG | UVMF_LOCAL) < 0)
3762 3761                          panic("HYPERVISOR_update_va_mapping() failed");
3763 3762                  if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3764 3763                      UVMF_INVLPG | UVMF_LOCAL) < 0)
3765 3764                          panic("HYPERVISOR_update_va_mapping() failed");
3766 3765  #endif
3767 3766                  mutex_exit(ppaddr_mutex);
3768 3767          }
3769 3768          kpreempt_enable();
3770 3769          return (ret);
3771 3770  }
3772 3771  
3773 3772  void
3774 3773  pagezero(page_t *pp, uint_t off, uint_t len)
3775 3774  {
3776 3775          ASSERT(PAGE_LOCKED(pp));
3777 3776          pfnzero(page_pptonum(pp), off, len);
3778 3777  }
3779 3778  
3780 3779  /*
3781 3780   * Zero the physical page from off to off + len given by pfn
3782 3781   * without changing the reference and modified bits of page.
3783 3782   *
3784 3783   * We use this using CPU private page address #2, see ppcopy() for more info.
3785 3784   * pfnzero() must not be called at interrupt level.
3786 3785   */
3787 3786  void
3788 3787  pfnzero(pfn_t pfn, uint_t off, uint_t len)
3789 3788  {
3790 3789          caddr_t         pp_addr2;
3791 3790          hat_mempte_t    pte2;
3792 3791          kmutex_t        *ppaddr_mutex = NULL;
3793 3792  
3794 3793          ASSERT_STACK_ALIGNED();
3795 3794          ASSERT(len <= MMU_PAGESIZE);
3796 3795          ASSERT(off <= MMU_PAGESIZE);
3797 3796          ASSERT(off + len <= MMU_PAGESIZE);
3798 3797  
3799 3798          if (kpm_enable && !pfn_is_foreign(pfn)) {
3800 3799                  pp_addr2 = hat_kpm_pfn2va(pfn);
3801 3800                  kpreempt_disable();
3802 3801          } else {
3803 3802                  kpreempt_disable();
3804 3803  
3805 3804                  pp_addr2 = CPU->cpu_caddr2;
3806 3805                  pte2 = CPU->cpu_caddr2pte;
3807 3806  
3808 3807                  ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3809 3808                  mutex_enter(ppaddr_mutex);
3810 3809  
3811 3810                  hat_mempte_remap(pfn, pp_addr2, pte2,
3812 3811                      PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3813 3812                      HAT_LOAD_NOCONSIST);
3814 3813          }
3815 3814  
3816 3815          if (use_sse_pagezero) {
3817 3816  #ifdef __xpv
3818 3817                  uint_t rem;
3819 3818  
3820 3819                  /*
3821 3820                   * zero a byte at a time until properly aligned for
3822 3821                   * block_zero_no_xmm().
3823 3822                   */
3824 3823                  while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0)
3825 3824                          pp_addr2[off++] = 0;
3826 3825  
3827 3826                  /*
3828 3827                   * Now use faster block_zero_no_xmm() for any range
3829 3828                   * that is properly aligned and sized.
3830 3829                   */
3831 3830                  rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN));
3832 3831                  len -= rem;
3833 3832                  if (len != 0) {
3834 3833                          block_zero_no_xmm(pp_addr2 + off, len);
3835 3834                          off += len;
3836 3835                  }
3837 3836  
3838 3837                  /*
3839 3838                   * zero remainder with byte stores.
3840 3839                   */
3841 3840                  while (rem-- > 0)
3842 3841                          pp_addr2[off++] = 0;
3843 3842  #else
3844 3843                  hwblkclr(pp_addr2 + off, len);
3845 3844  #endif
3846 3845          } else {
3847 3846                  bzero(pp_addr2 + off, len);
3848 3847          }
3849 3848  
3850 3849          if (!kpm_enable || pfn_is_foreign(pfn)) {
3851 3850  #ifdef __xpv
3852 3851                  /*
3853 3852                   * On the hypervisor this page might get used for a page
3854 3853                   * table before any intervening change to this mapping,
3855 3854                   * so blow it away.
3856 3855                   */
3857 3856                  if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3858 3857                      UVMF_INVLPG) < 0)
3859 3858                          panic("HYPERVISOR_update_va_mapping() failed");
3860 3859  #endif
3861 3860                  mutex_exit(ppaddr_mutex);
3862 3861          }
3863 3862  
3864 3863          kpreempt_enable();
3865 3864  }
3866 3865  
3867 3866  /*
3868 3867   * Platform-dependent page scrub call.
3869 3868   */
3870 3869  void
3871 3870  pagescrub(page_t *pp, uint_t off, uint_t len)
3872 3871  {
3873 3872          /*
3874 3873           * For now, we rely on the fact that pagezero() will
3875 3874           * always clear UEs.
3876 3875           */
3877 3876          pagezero(pp, off, len);
3878 3877  }
3879 3878  
3880 3879  /*
3881 3880   * set up two private addresses for use on a given CPU for use in ppcopy()
3882 3881   */
3883 3882  void
3884 3883  setup_vaddr_for_ppcopy(struct cpu *cpup)
3885 3884  {
3886 3885          void *addr;
3887 3886          hat_mempte_t pte_pa;
3888 3887  
3889 3888          addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3890 3889          pte_pa = hat_mempte_setup(addr);
3891 3890          cpup->cpu_caddr1 = addr;
3892 3891          cpup->cpu_caddr1pte = pte_pa;
3893 3892  
3894 3893          addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3895 3894          pte_pa = hat_mempte_setup(addr);
3896 3895          cpup->cpu_caddr2 = addr;
3897 3896          cpup->cpu_caddr2pte = pte_pa;
3898 3897  
3899 3898          mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL);
3900 3899  }
3901 3900  
3902 3901  /*
3903 3902   * Undo setup_vaddr_for_ppcopy
3904 3903   */
3905 3904  void
3906 3905  teardown_vaddr_for_ppcopy(struct cpu *cpup)
3907 3906  {
3908 3907          mutex_destroy(&cpup->cpu_ppaddr_mutex);
3909 3908  
3910 3909          hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte);
3911 3910          cpup->cpu_caddr2pte = 0;
3912 3911          vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1));
3913 3912          cpup->cpu_caddr2 = 0;
3914 3913  
3915 3914          hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte);
3916 3915          cpup->cpu_caddr1pte = 0;
3917 3916          vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1));
3918 3917          cpup->cpu_caddr1 = 0;
3919 3918  }
3920 3919  
3921 3920  /*
3922 3921   * Function for flushing D-cache when performing module relocations
3923 3922   * to an alternate mapping.  Unnecessary on Intel / AMD platforms.
3924 3923   */
3925 3924  void
3926 3925  dcache_flushall()
3927 3926  {}
3928 3927  
3929 3928  size_t
3930 3929  exec_get_spslew(void)
3931 3930  {
3932 3931          return (0);
3933 3932  }
3934 3933  
3935 3934  /*
3936 3935   * Allocate a memory page.  The argument 'seed' can be any pseudo-random
3937 3936   * number to vary where the pages come from.  This is quite a hacked up
3938 3937   * method -- it works for now, but really needs to be fixed up a bit.
3939 3938   *
3940 3939   * We currently use page_create_va() on the kvp with fake offsets,
3941 3940   * segments and virt address.  This is pretty bogus, but was copied from the
3942 3941   * old hat_i86.c code.  A better approach would be to specify either mnode
3943 3942   * random or mnode local and takes a page from whatever color has the MOST
3944 3943   * available - this would have a minimal impact on page coloring.
3945 3944   */
3946 3945  page_t *
3947 3946  page_get_physical(uintptr_t seed)
3948 3947  {
3949 3948          page_t *pp;
3950 3949          u_offset_t offset;
3951 3950          static struct seg tmpseg;
3952 3951          static uintptr_t ctr = 0;
3953 3952  
3954 3953          /*
3955 3954           * This code is gross, we really need a simpler page allocator.
3956 3955           *
3957 3956           * We need to assign an offset for the page to call page_create_va()
3958 3957           * To avoid conflicts with other pages, we get creative with the offset.
3959 3958           * For 32 bits, we need an offset > 4Gig
3960 3959           * For 64 bits, need an offset somewhere in the VA hole.
3961 3960           */
3962 3961          offset = seed;
3963 3962          if (offset > kernelbase)
3964 3963                  offset -= kernelbase;
3965 3964          offset <<= MMU_PAGESHIFT;
3966 3965  #if defined(__amd64)
3967 3966          offset += mmu.hole_start;       /* something in VA hole */
3968 3967  #else
3969 3968          offset += 1ULL << 40;   /* something > 4 Gig */
3970 3969  #endif
3971 3970  
3972 3971          if (page_resv(1, KM_NOSLEEP) == 0)
3973 3972                  return (NULL);
3974 3973  
3975 3974  #ifdef  DEBUG
3976 3975          pp = page_exists(&kvp, offset);
3977 3976          if (pp != NULL)
3978 3977                  panic("page already exists %p", (void *)pp);
3979 3978  #endif
3980 3979  
3981 3980          pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL,
3982 3981              &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE));   /* changing VA usage */
3983 3982          if (pp != NULL) {
3984 3983                  page_io_unlock(pp);
3985 3984                  page_downgrade(pp);
3986 3985          }
3987 3986          return (pp);
3988 3987  }
  
    | 
      ↓ open down ↓ | 
    3349 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX