1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright 2014 Joyent, Inc.  All rights reserved.
  25  */
  26 
  27 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  28 /*        All Rights Reserved   */
  29 
  30 /*
  31  * University Copyright- Copyright (c) 1982, 1986, 1988
  32  * The Regents of the University of California
  33  * All Rights Reserved
  34  *
  35  * University Acknowledgment- Portions of this document are derived from
  36  * software developed by the University of California, Berkeley, and its
  37  * contributors.
  38  */
  39 
  40 #ifndef _VM_HAT_H
  41 #define _VM_HAT_H
  42 
  43 #include <sys/types.h>
  44 #include <sys/t_lock.h>
  45 #include <vm/faultcode.h>
  46 #include <sys/kstat.h>
  47 #include <sys/siginfo.h>
  48 
  49 #ifdef  __cplusplus
  50 extern "C" {
  51 #endif
  52 
  53 /*
  54  * VM - Hardware Address Translation management.
  55  *
  56  * This file describes the machine independent interfaces to
  57  * the hardware address translation management routines.  Other
  58  * machine specific interfaces and structures are defined
  59  * in <vm/hat_xxx.h>.  The hat layer manages the address
  60  * translation hardware as a cache driven by calls from the
  61  * higher levels of the VM system.
  62  */
  63 
  64 struct hat;
  65 struct kpme;
  66 struct memseg;
  67 
  68 #include <vm/page.h>
  69 
  70 /*
  71  * a callback used with hat_unload_callback()
  72  * start and end mark are set to a range of unloaded addresses
  73  * and the function is invoked with a pointer to this data structure
  74  */
  75 typedef struct hat_callback {
  76         caddr_t hcb_start_addr;
  77         caddr_t hcb_end_addr;
  78         void    (*hcb_function)(struct hat_callback *);
  79         void    *hcb_data;
  80 } hat_callback_t;
  81 
  82 typedef void *hat_region_cookie_t;
  83 
  84 #ifdef  _KERNEL
  85 
  86 /*
  87  * One time hat initialization
  88  */
  89 void    hat_init(void);
  90 
  91 /*
  92  * Notify hat of a system dump
  93  */
  94 void    hat_dump(void);
  95 
  96 /*
  97  * Operations on an address space:
  98  *
  99  * struct hat *hat_alloc(as)
 100  *      allocated a hat structure for as.
 101  *
 102  * void hat_free_start(hat)
 103  *      informs hat layer process has finished executing but as has not
 104  *      been cleaned up yet.
 105  *
 106  * void hat_free_end(hat)
 107  *      informs hat layer as is being destroyed.  hat layer cannot use as
 108  *      pointer after this call.
 109  *
 110  * void hat_swapin(hat)
 111  *      allocate any hat resources required for process being swapped in.
 112  *
 113  * void hat_swapout(hat)
 114  *      deallocate hat resources for process being swapped out.
 115  *
 116  * size_t hat_get_mapped_size(hat)
 117  *      returns number of bytes that have valid mappings in hat.
 118  *
 119  * void hat_stats_enable(hat)
 120  * void hat_stats_disable(hat)
 121  *      enables/disables collection of stats for hat.
 122  *
 123  * int hat_dup(parenthat, childhat, addr, len, flags)
 124  *      Duplicate address translations of the parent to the child.  Supports
 125  *      the entire address range or a range depending on flag,
 126  *      zero returned on success, non-zero on error
 127  *
 128  * void hat_thread_exit(thread)
 129  *      Notifies the HAT that a thread is exiting, called after it has been
 130  *      reassigned to the kernel AS.
 131  */
 132 
 133 struct hat *hat_alloc(struct as *);
 134 void    hat_free_start(struct hat *);
 135 void    hat_free_end(struct hat *);
 136 int     hat_dup(struct hat *, struct hat *, caddr_t, size_t, uint_t);
 137 void    hat_swapin(struct hat *);
 138 void    hat_swapout(struct hat *);
 139 size_t  hat_get_mapped_size(struct hat *);
 140 int     hat_stats_enable(struct hat *);
 141 void    hat_stats_disable(struct hat *);
 142 void    hat_thread_exit(kthread_t *);
 143 
 144 /*
 145  * Operations on a named address within a segment:
 146  *
 147  * void hat_memload(hat, addr, pp, attr, flags)
 148  *      load/lock the given page struct
 149  *
 150  * void hat_memload_array(hat, addr, len, ppa, attr, flags)
 151  *      load/lock the given array of page structs
 152  *
 153  * void hat_devload(hat, addr, len, pf, attr, flags)
 154  *      load/lock the given page frame number
 155  *
 156  * void hat_unlock(hat, addr, len)
 157  *      unlock a given range of addresses
 158  *
 159  * void hat_unload(hat, addr, len, flags)
 160  * void hat_unload_callback(hat, addr, len, flags, callback)
 161  *      unload a given range of addresses (has optional callback)
 162  *
 163  * void hat_sync(hat, addr, len, flags)
 164  *      synchronize mapping with software data structures
 165  *
 166  * void hat_map(hat, addr, len, flags)
 167  *
 168  * void hat_setattr(hat, addr, len, attr)
 169  * void hat_clrattr(hat, addr, len, attr)
 170  * void hat_chgattr(hat, addr, len, attr)
 171  *      modify attributes for a range of addresses. skips any invalid mappings
 172  *
 173  * uint_t hat_getattr(hat, addr, *attr)
 174  *      returns attr for <hat,addr> in *attr.  returns 0 if there was a
 175  *      mapping and *attr is valid, nonzero if there was no mapping and
 176  *      *attr is not valid.
 177  *
 178  * size_t hat_getpagesize(hat, addr)
 179  *      returns pagesize in bytes for <hat, addr>. returns -1 if there is
 180  *      no mapping. This is an advisory call.
 181  *
 182  * pfn_t hat_getpfnum(hat, addr)
 183  *      returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
 184  *
 185  * int hat_probe(hat, addr)
 186  *      return 0 if no valid mapping is present.  Faster version
 187  *      of hat_getattr in certain architectures.
 188  *
 189  * int hat_share(dhat, daddr, shat, saddr, len, szc)
 190  *
 191  * void hat_unshare(hat, addr, len, szc)
 192  *
 193  * void hat_chgprot(hat, addr, len, vprot)
 194  *      This is a deprecated call.  New segment drivers should store
 195  *      all attributes and use hat_*attr calls.
 196  *      Change the protections in the virtual address range
 197  *      given to the specified virtual protection.  If vprot is ~PROT_WRITE,
 198  *      then remove write permission, leaving the other permissions
 199  *      unchanged.  If vprot is ~PROT_USER, remove user permissions.
 200  *
 201  * void hat_flush_range(hat, addr, size)
 202  *      Invalidate a virtual address translation for the local CPU.
 203  */
 204 
 205 void    hat_memload(struct hat *, caddr_t, struct page *, uint_t, uint_t);
 206 void    hat_memload_array(struct hat *, caddr_t, size_t, struct page **,
 207                 uint_t, uint_t);
 208 void    hat_memload_region(struct hat *, caddr_t, struct page *, uint_t,
 209                 uint_t, hat_region_cookie_t);
 210 void    hat_memload_array_region(struct hat *, caddr_t, size_t, struct page **,
 211                 uint_t, uint_t, hat_region_cookie_t);
 212 
 213 void    hat_devload(struct hat *, caddr_t, size_t, pfn_t, uint_t, int);
 214 
 215 void    hat_unlock(struct hat *, caddr_t, size_t);
 216 void    hat_unlock_region(struct hat *, caddr_t, size_t, hat_region_cookie_t);
 217 
 218 void    hat_unload(struct hat *, caddr_t, size_t, uint_t);
 219 void    hat_unload_callback(struct hat *, caddr_t, size_t, uint_t,
 220                 hat_callback_t *);
 221 void    hat_flush_range(struct hat *, caddr_t, size_t);
 222 void    hat_sync(struct hat *, caddr_t, size_t, uint_t);
 223 void    hat_map(struct hat *, caddr_t, size_t, uint_t);
 224 void    hat_setattr(struct hat *, caddr_t, size_t, uint_t);
 225 void    hat_clrattr(struct hat *, caddr_t, size_t, uint_t);
 226 void    hat_chgattr(struct hat *, caddr_t, size_t, uint_t);
 227 uint_t  hat_getattr(struct hat *, caddr_t, uint_t *);
 228 ssize_t hat_getpagesize(struct hat *, caddr_t);
 229 pfn_t   hat_getpfnum(struct hat *, caddr_t);
 230 int     hat_probe(struct hat *, caddr_t);
 231 int     hat_share(struct hat *, caddr_t, struct hat *, caddr_t, size_t, uint_t);
 232 void    hat_unshare(struct hat *, caddr_t, size_t, uint_t);
 233 void    hat_chgprot(struct hat *, caddr_t, size_t, uint_t);
 234 void    hat_reserve(struct as *, caddr_t, size_t);
 235 pfn_t   va_to_pfn(void *);
 236 uint64_t va_to_pa(void *);
 237 
 238 /*
 239  * Kernel Physical Mapping (segkpm) hat interface routines.
 240  */
 241 caddr_t hat_kpm_mapin(struct page *, struct kpme *);
 242 void    hat_kpm_mapout(struct page *, struct kpme *, caddr_t);
 243 caddr_t hat_kpm_mapin_pfn(pfn_t);
 244 void    hat_kpm_mapout_pfn(pfn_t);
 245 caddr_t hat_kpm_page2va(struct page *, int);
 246 struct page *hat_kpm_vaddr2page(caddr_t);
 247 int     hat_kpm_fault(struct hat *, caddr_t);
 248 void    hat_kpm_mseghash_clear(int);
 249 void    hat_kpm_mseghash_update(pgcnt_t, struct memseg *);
 250 void    hat_kpm_addmem_mseg_update(struct memseg *, pgcnt_t, offset_t);
 251 void    hat_kpm_addmem_mseg_insert(struct memseg *);
 252 void    hat_kpm_addmem_memsegs_update(struct memseg *);
 253 caddr_t hat_kpm_mseg_reuse(struct memseg *);
 254 void    hat_kpm_delmem_mseg_update(struct memseg *, struct memseg **);
 255 void    hat_kpm_split_mseg_update(struct memseg *, struct memseg **,
 256                         struct memseg *, struct memseg *, struct memseg *);
 257 void    hat_kpm_walk(void (*)(void *, void *, size_t), void *);
 258 
 259 /*
 260  * Operations on all translations for a given page(s)
 261  *
 262  * void hat_page_setattr(pp, flag)
 263  * void hat_page_clrattr(pp, flag)
 264  *      used to set/clr red/mod bits.
 265  *
 266  * uint hat_page_getattr(pp, flag)
 267  *      If flag is specified, returns 0 if attribute is disabled
 268  *      and non zero if enabled.  If flag specifes multiple attributs
 269  *      then returns 0 if ALL atriibutes are disabled.  This is an advisory
 270  *      call.
 271  *
 272  * int hat_pageunload(pp, forceflag)
 273  *      Unload all translations attached to pp. On x86 the bulk of the work is
 274  *      done by hat_page_inval.
 275  *
 276  * void hat_page_inval(pp, pgsz, curhat)
 277  *      Unload translations attached to pp. If curhat is provided, only the
 278  *      translation for that process is unloaded, otherwise all are unloaded.
 279  *
 280  * uint_t hat_pagesync(pp, flags)
 281  *      get hw stats from hardware into page struct and reset hw stats
 282  *      returns attributes of page
 283  *
 284  * ulong_t hat_page_getshare(pp)
 285  *      returns approx number of mappings to this pp.  A return of 0 implies
 286  *      there are no mappings to the page.
 287  *
 288  * faultcode_t hat_softlock(hat, addr, lenp, ppp, flags);
 289  *      called to softlock pages for zero copy tcp
 290  *
 291  * void hat_page_demote(pp);
 292  *      unload all large mappings to pp and decrease p_szc of all
 293  *      constituent pages according to the remaining mappings.
 294  */
 295 
 296 void    hat_page_setattr(struct page *, uint_t);
 297 void    hat_page_clrattr(struct page *, uint_t);
 298 uint_t  hat_page_getattr(struct page *, uint_t);
 299 int     hat_pageunload(struct page *, uint_t);
 300 void    hat_page_inval(struct page *, uint_t, struct hat *);
 301 uint_t  hat_pagesync(struct page *, uint_t);
 302 ulong_t hat_page_getshare(struct page *);
 303 int     hat_page_checkshare(struct page *, ulong_t);
 304 faultcode_t hat_softlock(struct hat *, caddr_t, size_t *,
 305                         struct page **, uint_t);
 306 void    hat_page_demote(struct page *);
 307 
 308 /*
 309  * Rountine to expose supported HAT features to PIM.
 310  */
 311 enum hat_features {
 312         HAT_SHARED_PT,          /* Shared page tables */
 313         HAT_DYNAMIC_ISM_UNMAP,  /* hat_pageunload() handles ISM pages */
 314         HAT_VMODSORT,           /* support for VMODSORT flag of vnode */
 315         HAT_SHARED_REGIONS      /* shared regions support */
 316 };
 317 
 318 int hat_supported(enum hat_features, void *);
 319 
 320 /*
 321  * Services provided to the hat:
 322  *
 323  * void as_signal_proc(as, siginfo)
 324  *      deliver signal to all processes that have this as.
 325  *
 326  * int hat_setstat(as, addr, len, rmbits)
 327  *      informs hatstat layer that ref/mod bits need to be updated for
 328  *      address range. Returns 0 on success, 1 for failure.
 329  */
 330 void    as_signal_proc(struct as *, k_siginfo_t *siginfo);
 331 void    hat_setstat(struct as *, caddr_t, size_t, uint_t);
 332 
 333 /*
 334  * Flags to pass to hat routines.
 335  *
 336  * Certain flags only apply to some interfaces:
 337  *
 338  *      HAT_LOAD        Default flags to load a translation to the page.
 339  *      HAT_LOAD_LOCK   Lock down mapping resources; hat_map(), hat_memload(),
 340  *                      and hat_devload().
 341  *      HAT_LOAD_ADV    Advisory load - Load translation if and only if
 342  *                      sufficient MMU resources exist (i.e., do not steal).
 343  *      HAT_LOAD_SHARE  A flag to hat_memload() to indicate h/w page tables
 344  *                      that map some user pages (not kas) is shared by more
 345  *                      than one process (eg. ISM).
 346  *      HAT_LOAD_CONTIG Pages are contigous
 347  *      HAT_LOAD_NOCONSIST Do not add mapping to mapping list.
 348  *      HAT_LOAD_REMAP  Reload a valid pte with a different page frame.
 349  *      HAT_RELOAD_SHARE Reload a shared page table entry. Some platforms
 350  *                       may require different actions than on the first
 351  *                       load of a shared mapping.
 352  *      HAT_NO_KALLOC   Do not kmem_alloc while creating the mapping; at this
 353  *                      point, it's setting up mapping to allocate internal
 354  *                      hat layer data structures.  This flag forces hat layer
 355  *                      to tap its reserves in order to prevent infinite
 356  *                      recursion.
 357  *      HAT_LOAD_TEXT   A flag to hat_memload() to indicate loading text pages.
 358  */
 359 
 360 /*
 361  * Flags for hat_memload/hat_devload
 362  */
 363 #define HAT_FLAGS_RESV          0xFF000000      /* resv for hat impl */
 364 #define HAT_LOAD                0x00
 365 #define HAT_LOAD_LOCK           0x01
 366 #define HAT_LOAD_ADV            0x04
 367 #define HAT_LOAD_CONTIG         0x10
 368 #define HAT_LOAD_NOCONSIST      0x20
 369 #define HAT_LOAD_SHARE          0x40
 370 #define HAT_LOAD_REMAP          0x80
 371 #define HAT_RELOAD_SHARE        0x100
 372 #define HAT_NO_KALLOC           0x200
 373 #define HAT_LOAD_TEXT           0x400
 374 
 375 /*
 376  * Flags for initializing disable_*large_pages.
 377  *
 378  *      HAT_AUTO_TEXT   Get MMU specific disable_auto_text_large_pages
 379  *      HAT_AUTO_DATA   Get MMU specific disable_auto_data_large_pages
 380  */
 381 #define HAT_AUTO_TEXT           0x800
 382 #define HAT_AUTO_DATA           0x1000
 383 
 384 /*
 385  * Attributes for hat_memload/hat_devload/hat_*attr
 386  * are a superset of prot flags defined in mman.h.
 387  */
 388 #define HAT_PLAT_ATTR_MASK      0xF00000
 389 #define HAT_PROT_MASK           0x0F
 390 
 391 #define HAT_NOFAULT             0x10
 392 #define HAT_NOSYNC              0x20
 393 
 394 /*
 395  * Advisory ordering attributes. Apply only to device mappings.
 396  *
 397  * HAT_STRICTORDER: the CPU must issue the references in order, as the
 398  *      programmer specified.  This is the default.
 399  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
 400  *      of reordering; store or load with store or load).
 401  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
 402  *      to consecutive locations (for example, turn two consecutive byte
 403  *      stores into one halfword store), and it may batch individual loads
 404  *      (for example, turn two consecutive byte loads into one halfword load).
 405  *      This also implies re-ordering.
 406  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
 407  *      until another store occurs.  The default is to fetch new data
 408  *      on every load.  This also implies merging.
 409  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
 410  *      the device (perhaps with other data) at a later time.  The default is
 411  *      to push the data right away.  This also implies load caching.
 412  */
 413 #define HAT_STRICTORDER         0x0000
 414 #define HAT_UNORDERED_OK        0x0100
 415 #define HAT_MERGING_OK          0x0200
 416 #define HAT_LOADCACHING_OK      0x0300
 417 #define HAT_STORECACHING_OK     0x0400
 418 #define HAT_ORDER_MASK          0x0700
 419 
 420 /* endian attributes */
 421 #define HAT_NEVERSWAP           0x0000
 422 #define HAT_STRUCTURE_BE        0x1000
 423 #define HAT_STRUCTURE_LE        0x2000
 424 #define HAT_ENDIAN_MASK         0x3000
 425 
 426 /* flags for hat_softlock */
 427 #define HAT_COW                 0x0001
 428 
 429 /*
 430  * Flags for hat_unload
 431  */
 432 #define HAT_UNLOAD              0x00
 433 #define HAT_UNLOAD_NOSYNC       0x02
 434 #define HAT_UNLOAD_UNLOCK       0x04
 435 #define HAT_UNLOAD_OTHER        0x08
 436 #define HAT_UNLOAD_UNMAP        0x10
 437 
 438 /*
 439  * Flags for hat_pagesync, hat_getstat, hat_sync
 440  */
 441 #define HAT_SYNC_DONTZERO       0x00
 442 #define HAT_SYNC_ZERORM         0x01
 443 /* Additional flags for hat_pagesync */
 444 #define HAT_SYNC_STOPON_REF     0x02
 445 #define HAT_SYNC_STOPON_MOD     0x04
 446 #define HAT_SYNC_STOPON_RM      (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD)
 447 #define HAT_SYNC_STOPON_SHARED  0x08
 448 
 449 /*
 450  * Flags for hat_dup
 451  *
 452  * HAT_DUP_ALL dup entire address space
 453  * HAT_DUP_COW dup plus hat_clrattr(..PROT_WRITE) on newas
 454  */
 455 #define HAT_DUP_ALL             1
 456 #define HAT_DUP_COW             2
 457 #define HAT_DUP_SRD             3
 458 
 459 
 460 /*
 461  * Flags for hat_map
 462  */
 463 #define HAT_MAP                 0x00
 464 
 465 /*
 466  * Flag for hat_pageunload
 467  */
 468 #define HAT_ADV_PGUNLOAD        0x00
 469 #define HAT_FORCE_PGUNLOAD      0x01
 470 #define HAT_CURPROC_PGUNLOAD    0x02
 471 
 472 /*
 473  * Attributes for hat_page_*attr, hat_setstats and
 474  * returned by hat_pagesync.
 475  */
 476 #define P_MOD   0x1             /* the modified bit */
 477 #define P_REF   0x2             /* the referenced bit */
 478 #define P_RO    0x4             /* Read only page */
 479 #define P_NSH   0x8             /* Not to shuffle v_pages */
 480 
 481 #define hat_ismod(pp)           (hat_page_getattr(pp, P_MOD))
 482 #define hat_isref(pp)           (hat_page_getattr(pp, P_REF))
 483 #define hat_isro(pp)            (hat_page_getattr(pp, P_RO))
 484 
 485 #define hat_setmod(pp)          (hat_page_setattr(pp, P_MOD))
 486 #define hat_setmod_only(pp)     (hat_page_setattr(pp, P_MOD|P_NSH))
 487 #define hat_setref(pp)          (hat_page_setattr(pp, P_REF))
 488 #define hat_setrefmod(pp)       (hat_page_setattr(pp, P_REF|P_MOD))
 489 
 490 #define hat_clrmod(pp)          (hat_page_clrattr(pp, P_MOD))
 491 #define hat_clrref(pp)          (hat_page_clrattr(pp, P_REF))
 492 #define hat_clrrefmod(pp)       (hat_page_clrattr(pp, P_REF|P_MOD))
 493 
 494 #define hat_page_is_mapped(pp)  (hat_page_getshare(pp))
 495 
 496 /*
 497  * hat_setup is being used in sparc/os/sundep.c
 498  */
 499 void    hat_setup(struct hat *, int);
 500 
 501 /*
 502  * Flags for hat_setup
 503  */
 504 #define HAT_DONTALLOC           0
 505 #define HAT_ALLOC               1
 506 #define HAT_INIT                2
 507 
 508 /*
 509  * Other routines, for statistics
 510  */
 511 int     hat_startstat(struct as *);
 512 void    hat_getstat(struct as *, caddr_t, size_t, uint_t, char *, int);
 513 void    hat_freestat(struct as *, int);
 514 void    hat_resvstat(size_t, struct as *, caddr_t);
 515 
 516 /*
 517  * Relocation callback routines. Currently only sfmmu HAT supports
 518  * these.
 519  */
 520 extern int      hat_add_callback(id_t, caddr_t, uint_t, uint_t, void *,
 521         pfn_t *, void **);
 522 extern id_t     hat_register_callback(int,
 523         int (*prehandler)(caddr_t, uint_t, uint_t, void *),
 524         int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
 525         int (*errhandler)(caddr_t, uint_t, uint_t, void *), int);
 526 extern void     hat_delete_callback(caddr_t, uint_t, void *, uint_t, void *);
 527 
 528 /*
 529  * hat_add_callback()/hat_delete_callback() flags.
 530  */
 531 #define HAC_NOSLEEP     0x0
 532 #define HAC_SLEEP       0x1
 533 #define HAC_PAGELOCK    0x2
 534 
 535 /*
 536  * Suspend/unsuspend handler callback arguments.
 537  */
 538 #define HAT_SUSPEND             0x0010
 539 #define HAT_UNSUSPEND           0x0010
 540 #define HAT_PRESUSPEND          0x0020
 541 #define HAT_POSTUNSUSPEND       0x0020
 542 
 543 /*
 544  * Error handler callback arguments. See the block comments
 545  * before the implementation of hat_add_callback() for an
 546  * explanation of what these mean.
 547  */
 548 #define HAT_CB_ERR_LEAKED       0x1
 549 
 550 #endif /* _KERNEL */
 551 
 552 /*
 553  * The size of the bit array for ref and mod bit storage must be a power of 2.
 554  * 2 bits are collected for each page.  Below the power used is 4,
 555  * which is 16 8-bit characters = 128 bits, ref and mod bit information
 556  * for 64 pages.
 557  */
 558 #define HRM_SHIFT               4
 559 #define HRM_BYTES               (1 << HRM_SHIFT)
 560 #define HRM_PAGES               ((HRM_BYTES * NBBY) / 2)
 561 #define HRM_PGPERBYTE           (NBBY/2)
 562 #define HRM_PGBYTEMASK          (HRM_PGPERBYTE-1)
 563 
 564 #define HRM_PGOFFMASK           ((HRM_PGPERBYTE-1) << MMU_PAGESHIFT)
 565 #define HRM_BASEOFFSET          (((MMU_PAGESIZE * HRM_PAGES) - 1))
 566 #define HRM_BASEMASK            (~(HRM_BASEOFFSET))
 567 
 568 #define HRM_BASESHIFT           (MMU_PAGESHIFT + (HRM_SHIFT + 2))
 569 #define HRM_PAGEMASK            (MMU_PAGEMASK ^ HRM_BASEMASK)
 570 
 571 #define HRM_HASHSIZE            0x200
 572 #define HRM_HASHMASK            (HRM_HASHSIZE - 1)
 573 
 574 #define HRM_BLIST_INCR          0x200
 575 
 576 /*
 577  * The structure for maintaining referenced and modified information
 578  */
 579 struct hrmstat {
 580         struct as       *hrm_as;        /* stat block belongs to this as */
 581         uintptr_t       hrm_base;       /* base of block */
 582         ushort_t        hrm_id;         /* opaque identifier, one of a_vbits */
 583         struct hrmstat  *hrm_anext;     /* as statistics block list */
 584         struct hrmstat  *hrm_hnext;     /* list for hashed blocks */
 585         uchar_t         hrm_bits[HRM_BYTES]; /* the ref and mod bits */
 586 };
 587 
 588 extern struct hrmstat **hrm_hashtab;
 589 
 590 /*
 591  * For global monitoring of the reference and modified bits
 592  * of all address spaces we reserve one id bit.
 593  */
 594 #define HRM_SWSMONID    1
 595 
 596 
 597 #ifdef _KERNEL
 598 
 599 /*
 600  * Hat locking functions
 601  * XXX - these two functions are currently being used by hatstats
 602  *      they can be removed by using a per-as mutex for hatstats.
 603  */
 604 void    hat_enter(struct hat *);
 605 void    hat_exit(struct hat *);
 606 
 607 typedef void (*hat_rgn_cb_func_t)(caddr_t, caddr_t, caddr_t,
 608     size_t, void *, u_offset_t);
 609 
 610 void                    hat_join_srd(struct hat *, vnode_t *);
 611 
 612 hat_region_cookie_t     hat_join_region(struct hat *, caddr_t, size_t, void *,
 613                             u_offset_t, uchar_t, uchar_t, hat_rgn_cb_func_t,
 614                             uint_t);
 615 void                    hat_leave_region(struct hat *, hat_region_cookie_t,
 616                             uint_t);
 617 void                    hat_dup_region(struct hat *, hat_region_cookie_t);
 618 
 619 #define HAT_INVALID_REGION_COOKIE       ((hat_region_cookie_t)-1)
 620 #define HAT_IS_REGION_COOKIE_VALID(c)   ((c) != HAT_INVALID_REGION_COOKIE)
 621 
 622 /* hat_join_region() flags */
 623 
 624 #define HAT_REGION_TEXT 0x1     /* passed by segvn */
 625 #define HAT_REGION_ISM  0x2     /* for hat_share()/hat_unshare() */
 626 
 627 #define HAT_REGION_TYPE_MASK    (0x7)
 628 
 629 #endif /* _KERNEL */
 630 
 631 #ifdef  __cplusplus
 632 }
 633 #endif
 634 
 635 #endif  /* _VM_HAT_H */