1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 /*
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
33 *
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
37 */
38
39 #ifndef _VM_PAGE_H
40 #define _VM_PAGE_H
41
42 #include <vm/seg.h>
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48 #if defined(_KERNEL) || defined(_KMEMUSER)
49
50 /*
51 * Shared/Exclusive lock.
52 */
53
54 /*
55 * Types of page locking supported by page_lock & friends.
56 */
57 typedef enum {
58 SE_SHARED,
59 SE_EXCL /* exclusive lock (value == -1) */
60 } se_t;
61
62 /*
63 * For requesting that page_lock reclaim the page from the free list.
64 */
65 typedef enum {
66 P_RECLAIM, /* reclaim page from free list */
67 P_NO_RECLAIM /* DON`T reclaim the page */
68 } reclaim_t;
69
70 /*
71 * Callers of page_try_reclaim_lock and page_lock_es can use this flag
72 * to get SE_EXCL access before reader/writers are given access.
73 */
74 #define SE_EXCL_WANTED 0x02
75
76 /*
77 * All page_*lock() requests will be denied unless this flag is set in
78 * the 'es' parameter.
79 */
80 #define SE_RETIRED 0x04
81
82 #endif /* _KERNEL | _KMEMUSER */
83
84 typedef int selock_t;
85
86 /*
87 * Define VM_STATS to turn on all sorts of statistic gathering about
88 * the VM layer. By default, it is only turned on when DEBUG is
89 * also defined.
90 */
91 #ifdef DEBUG
92 #define VM_STATS
93 #endif /* DEBUG */
94
95 #ifdef VM_STATS
96 #define VM_STAT_ADD(stat) (stat)++
97 #define VM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++))
98 #else
99 #define VM_STAT_ADD(stat)
100 #define VM_STAT_COND_ADD(cond, stat)
101 #endif /* VM_STATS */
102
103 #ifdef _KERNEL
104
105 /*
106 * PAGE_LLOCK_SIZE is 2 * NCPU, but no smaller than 128.
107 * PAGE_LLOCK_SHIFT is log2(PAGE_LLOCK_SIZE).
108 *
109 * We use ? : instead of #if because <vm/page.h> is included everywhere;
110 * NCPU_P2 is only a constant in the "unix" module.
111 *
112 */
113 #define PAGE_LLOCK_SHIFT \
114 ((unsigned)(((2*NCPU_P2) > 128) ? NCPU_LOG2 + 1 : 7))
115
116 #define PAGE_LLOCK_SIZE (1ul << PAGE_LLOCK_SHIFT)
117
118 /*
119 * The number of low order 0 (or less variable) bits in the page_t address.
120 */
121 #if defined(__sparc)
122 #define PP_SHIFT 7
123 #else
124 #define PP_SHIFT 6
125 #endif
126
127 /*
128 * pp may be the root of a large page, and many low order bits will be 0.
129 * Shift and XOR multiple times to capture the good bits across the range of
130 * possible page sizes.
131 */
132 #define PAGE_LLOCK_HASH(pp) \
133 (((((uintptr_t)(pp) >> PP_SHIFT) ^ \
134 ((uintptr_t)(pp) >> (PAGE_LLOCK_SHIFT + PP_SHIFT))) ^ \
135 ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 2) + PP_SHIFT)) ^ \
136 ((uintptr_t)(pp) >> ((PAGE_LLOCK_SHIFT * 3) + PP_SHIFT))) & \
137 (PAGE_LLOCK_SIZE - 1))
138
139 #define page_struct_lock(pp) \
140 mutex_enter(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex)
141 #define page_struct_unlock(pp) \
142 mutex_exit(&page_llocks[PAGE_LLOCK_HASH(PP_PAGEROOT(pp))].pad_mutex)
143
144 #endif /* _KERNEL */
145
146 #include <sys/t_lock.h>
147
148 struct as;
149
150 /*
151 * Each physical page has a page structure, which is used to maintain
152 * these pages as a cache. A page can be found via a hashed lookup
153 * based on the [vp, offset]. If a page has an [vp, offset] identity,
154 * then it is entered on a doubly linked circular list off the
155 * vnode using the vpnext/vpprev pointers. If the p_free bit
156 * is on, then the page is also on a doubly linked circular free
157 * list using next/prev pointers. If the "p_selock" and "p_iolock"
158 * are held, then the page is currently being read in (exclusive p_selock)
159 * or written back (shared p_selock). In this case, the next/prev pointers
160 * are used to link the pages together for a consecutive i/o request. If
161 * the page is being brought in from its backing store, then other processes
162 * will wait for the i/o to complete before attaching to the page since it
163 * will have an "exclusive" lock.
164 *
165 * Each page structure has the locks described below along with
166 * the fields they protect:
167 *
168 * p_selock This is a per-page shared/exclusive lock that is
169 * used to implement the logical shared/exclusive
170 * lock for each page. The "shared" lock is normally
171 * used in most cases while the "exclusive" lock is
172 * required to destroy or retain exclusive access to
173 * a page (e.g., while reading in pages). The appropriate
174 * lock is always held whenever there is any reference
175 * to a page structure (e.g., during i/o).
176 * (Note that with the addition of the "writer-lock-wanted"
177 * semantics (via SE_EWANTED), threads must not acquire
178 * multiple reader locks or else a deadly embrace will
179 * occur in the following situation: thread 1 obtains a
180 * reader lock; next thread 2 fails to get a writer lock
181 * but specified SE_EWANTED so it will wait by either
182 * blocking (when using page_lock_es) or spinning while
183 * retrying (when using page_try_reclaim_lock) until the
184 * reader lock is released; then thread 1 attempts to
185 * get another reader lock but is denied due to
186 * SE_EWANTED being set, and now both threads are in a
187 * deadly embrace.)
188 *
189 * p_hash
190 * p_vnode
191 * p_offset
192 *
193 * p_free
194 * p_age
195 *
196 * p_iolock This is a binary semaphore lock that provides
197 * exclusive access to the i/o list links in each
198 * page structure. It is always held while the page
199 * is on an i/o list (i.e., involved in i/o). That is,
200 * even though a page may be only `shared' locked
201 * while it is doing a write, the following fields may
202 * change anyway. Normally, the page must be
203 * `exclusively' locked to change anything in it.
204 *
205 * p_next
206 * p_prev
207 *
208 * The following fields are protected by the global page_llocks[]:
209 *
210 * p_lckcnt
211 * p_cowcnt
212 *
213 * The following lists are protected by the global page_freelock:
214 *
215 * page_cachelist
216 * page_freelist
217 *
218 * The following, for our purposes, are protected by
219 * the global freemem_lock:
220 *
221 * freemem
222 * freemem_wait
223 * freemem_cv
224 *
225 * The following fields are protected by hat layer lock(s). When a page
226 * structure is not mapped and is not associated with a vnode (after a call
227 * to page_hashout() for example) the p_nrm field may be modified with out
228 * holding the hat layer lock:
229 *
230 * p_nrm
231 * p_mapping
232 * p_share
233 *
234 * The following field is file system dependent. How it is used and
235 * the locking strategies applied are up to the individual file system
236 * implementation.
237 *
238 * p_fsdata
239 *
240 * The page structure is used to represent and control the system's
241 * physical pages. There is one instance of the structure for each
242 * page that is not permenately allocated. For example, the pages that
243 * hold the page structures are permanently held by the kernel
244 * and hence do not need page structures to track them. The array
245 * of page structures is allocated early on in the kernel's life and
246 * is based on the amount of available physical memory.
247 *
248 * Each page structure may simultaneously appear on several linked lists.
249 * The lists are: hash list, free or in i/o list, and a vnode's page list.
250 * Each type of list is protected by a different group of mutexes as described
251 * below:
252 *
253 * The hash list is used to quickly find a page when the page's vnode and
254 * offset within the vnode are known. Each page that is hashed is
255 * connected via the `p_hash' field. The anchor for each hash is in the
256 * array `page_hash'. An array of mutexes, `ph_mutex', protects the
257 * lists anchored by page_hash[]. To either search or modify a given hash
258 * list, the appropriate mutex in the ph_mutex array must be held.
259 *
260 * The free list contains pages that are `free to be given away'. For
261 * efficiency reasons, pages on this list are placed in two catagories:
262 * pages that are still associated with a vnode, and pages that are not
263 * associated with a vnode. Free pages always have their `p_free' bit set,
264 * free pages that are still associated with a vnode also have their
265 * `p_age' bit set. Pages on the free list are connected via their
266 * `p_next' and `p_prev' fields. When a page is involved in some sort
267 * of i/o, it is not free and these fields may be used to link associated
268 * pages together. At the moment, the free list is protected by a
269 * single mutex `page_freelock'. The list of free pages still associated
270 * with a vnode is anchored by `page_cachelist' while other free pages
271 * are anchored in architecture dependent ways (to handle page coloring etc.).
272 *
273 * Pages associated with a given vnode appear on a list anchored in the
274 * vnode by the `v_pages' field. They are linked together with
275 * `p_vpnext' and `p_vpprev'. The field `p_offset' contains a page's
276 * offset within the vnode. The pages on this list are not kept in
277 * offset order. These lists, in a manner similar to the hash lists,
278 * are protected by an array of mutexes called `vph_hash'. Before
279 * searching or modifying this chain the appropriate mutex in the
280 * vph_hash[] array must be held.
281 *
282 * Again, each of the lists that a page can appear on is protected by a
283 * mutex. Before reading or writing any of the fields comprising the
284 * list, the appropriate lock must be held. These list locks should only
285 * be held for very short intervals.
286 *
287 * In addition to the list locks, each page structure contains a
288 * shared/exclusive lock that protects various fields within it.
289 * To modify one of these fields, the `p_selock' must be exclusively held.
290 * To read a field with a degree of certainty, the lock must be at least
291 * held shared.
292 *
293 * Removing a page structure from one of the lists requires holding
294 * the appropriate list lock and the page's p_selock. A page may be
295 * prevented from changing identity, being freed, or otherwise modified
296 * by acquiring p_selock shared.
297 *
298 * To avoid deadlocks, a strict locking protocol must be followed. Basically
299 * there are two cases: In the first case, the page structure in question
300 * is known ahead of time (e.g., when the page is to be added or removed
301 * from a list). In the second case, the page structure is not known and
302 * must be found by searching one of the lists.
303 *
304 * When adding or removing a known page to one of the lists, first the
305 * page must be exclusively locked (since at least one of its fields
306 * will be modified), second the lock protecting the list must be acquired,
307 * third the page inserted or deleted, and finally the list lock dropped.
308 *
309 * The more interesting case occures when the particular page structure
310 * is not known ahead of time. For example, when a call is made to
311 * page_lookup(), it is not known if a page with the desired (vnode and
312 * offset pair) identity exists. So the appropriate mutex in ph_mutex is
313 * acquired, the hash list searched, and if the desired page is found
314 * an attempt is made to lock it. The attempt to acquire p_selock must
315 * not block while the hash list lock is held. A deadlock could occure
316 * if some other process was trying to remove the page from the list.
317 * The removing process (following the above protocol) would have exclusively
318 * locked the page, and be spinning waiting to acquire the lock protecting
319 * the hash list. Since the searching process holds the hash list lock
320 * and is waiting to acquire the page lock, a deadlock occurs.
321 *
322 * The proper scheme to follow is: first, lock the appropriate list,
323 * search the list, and if the desired page is found either use
324 * page_trylock() (which will not block) or pass the address of the
325 * list lock to page_lock(). If page_lock() can not acquire the page's
326 * lock, it will drop the list lock before going to sleep. page_lock()
327 * returns a value to indicate if the list lock was dropped allowing the
328 * calling program to react appropriately (i.e., retry the operation).
329 *
330 * If the list lock was dropped before the attempt at locking the page
331 * was made, checks would have to be made to ensure that the page had
332 * not changed identity before its lock was obtained. This is because
333 * the interval between dropping the list lock and acquiring the page
334 * lock is indeterminate.
335 *
336 * In addition, when both a hash list lock (ph_mutex[]) and a vnode list
337 * lock (vph_mutex[]) are needed, the hash list lock must be acquired first.
338 * The routine page_hashin() is a good example of this sequence.
339 * This sequence is ASSERTed by checking that the vph_mutex[] is not held
340 * just before each acquisition of one of the mutexs in ph_mutex[].
341 *
342 * So, as a quick summary:
343 *
344 * pse_mutex[]'s protect the p_selock and p_cv fields.
345 *
346 * p_selock protects the p_free, p_age, p_vnode, p_offset and p_hash,
347 *
348 * ph_mutex[]'s protect the page_hash[] array and its chains.
349 *
350 * vph_mutex[]'s protect the v_pages field and the vp page chains.
351 *
352 * First lock the page, then the hash chain, then the vnode chain. When
353 * this is not possible `trylocks' must be used. Sleeping while holding
354 * any of these mutexes (p_selock is not a mutex) is not allowed.
355 *
356 *
357 * field reading writing ordering
358 * ======================================================================
359 * p_vnode p_selock(E,S) p_selock(E)
360 * p_offset
361 * p_free
362 * p_age
363 * =====================================================================
364 * p_hash p_selock(E,S) p_selock(E) && p_selock, ph_mutex
365 * ph_mutex[]
366 * =====================================================================
367 * p_vpnext p_selock(E,S) p_selock(E) && p_selock, vph_mutex
368 * p_vpprev vph_mutex[]
369 * =====================================================================
370 * When the p_free bit is set:
371 *
372 * p_next p_selock(E,S) p_selock(E) && p_selock,
373 * p_prev page_freelock page_freelock
374 *
375 * When the p_free bit is not set:
376 *
377 * p_next p_selock(E,S) p_selock(E) && p_selock, p_iolock
378 * p_prev p_iolock
379 * =====================================================================
380 * p_selock pse_mutex[] pse_mutex[] can`t acquire any
381 * p_cv other mutexes or
382 * sleep while holding
383 * this lock.
384 * =====================================================================
385 * p_lckcnt p_selock(E,S) p_selock(E)
386 * OR
387 * p_selock(S) &&
388 * page_llocks[]
389 * p_cowcnt
390 * =====================================================================
391 * p_nrm hat layer lock hat layer lock
392 * p_mapping
393 * p_pagenum
394 * =====================================================================
395 *
396 * where:
397 * E----> exclusive version of p_selock.
398 * S----> shared version of p_selock.
399 *
400 *
401 * Global data structures and variable:
402 *
403 * field reading writing ordering
404 * =====================================================================
405 * page_hash[] ph_mutex[] ph_mutex[] can hold this lock
406 * before acquiring
407 * a vph_mutex or
408 * pse_mutex.
409 * =====================================================================
410 * vp->v_pages vph_mutex[] vph_mutex[] can only acquire
411 * a pse_mutex while
412 * holding this lock.
413 * =====================================================================
414 * page_cachelist page_freelock page_freelock can't acquire any
415 * page_freelist page_freelock page_freelock
416 * =====================================================================
417 * freemem freemem_lock freemem_lock can't acquire any
418 * freemem_wait other mutexes while
419 * freemem_cv holding this mutex.
420 * =====================================================================
421 *
422 * Page relocation, PG_NORELOC and P_NORELOC.
423 *
424 * Pages may be relocated using the page_relocate() interface. Relocation
425 * involves moving the contents and identity of a page to another, free page.
426 * To relocate a page, the SE_EXCL lock must be obtained. The way to prevent
427 * a page from being relocated is to hold the SE_SHARED lock (the SE_EXCL
428 * lock must not be held indefinitely). If the page is going to be held
429 * SE_SHARED indefinitely, then the PG_NORELOC hint should be passed
430 * to page_create_va so that pages that are prevented from being relocated
431 * can be managed differently by the platform specific layer.
432 *
433 * Pages locked in memory using page_pp_lock (p_lckcnt/p_cowcnt != 0)
434 * are guaranteed to be held in memory, but can still be relocated
435 * providing the SE_EXCL lock can be obtained.
436 *
437 * The P_NORELOC bit in the page_t.p_state field is provided for use by
438 * the platform specific code in managing pages when the PG_NORELOC
439 * hint is used.
440 *
441 * Memory delete and page locking.
442 *
443 * The set of all usable pages is managed using the global page list as
444 * implemented by the memseg structure defined below. When memory is added
445 * or deleted this list changes. Additions to this list guarantee that the
446 * list is never corrupt. In order to avoid the necessity of an additional
447 * lock to protect against failed accesses to the memseg being deleted and,
448 * more importantly, the page_ts, the memseg structure is never freed and the
449 * page_t virtual address space is remapped to a page (or pages) of
450 * zeros. If a page_t is manipulated while it is p_selock'd, or if it is
451 * locked indirectly via a hash or freelist lock, it is not possible for
452 * memory delete to collect the page and so that part of the page list is
453 * prevented from being deleted. If the page is referenced outside of one
454 * of these locks, it is possible for the page_t being referenced to be
455 * deleted. Examples of this are page_t pointers returned by
456 * page_numtopp_nolock, page_first and page_next. Providing the page_t
457 * is re-checked after taking the p_selock (for p_vnode != NULL), the
458 * remapping to the zero pages will be detected.
459 *
460 *
461 * Page size (p_szc field) and page locking.
462 *
463 * p_szc field of free pages is changed by free list manager under freelist
464 * locks and is of no concern to the rest of VM subsystem.
465 *
466 * p_szc changes of allocated anonymous (swapfs) can only be done only after
467 * exclusively locking all constituent pages and calling hat_pageunload() on
468 * each of them. To prevent p_szc changes of non free anonymous (swapfs) large
469 * pages it's enough to either lock SHARED any of constituent pages or prevent
470 * hat_pageunload() by holding hat level lock that protects mapping lists (this
471 * method is for hat code only)
472 *
473 * To increase (promote) p_szc of allocated non anonymous file system pages
474 * one has to first lock exclusively all involved constituent pages and call
475 * hat_pageunload() on each of them. To prevent p_szc promote it's enough to
476 * either lock SHARED any of constituent pages that will be needed to make a
477 * large page or prevent hat_pageunload() by holding hat level lock that
478 * protects mapping lists (this method is for hat code only).
479 *
480 * To decrease (demote) p_szc of an allocated non anonymous file system large
481 * page one can either use the same method as used for changeing p_szc of
482 * anonymous large pages or if it's not possible to lock all constituent pages
483 * exclusively a different method can be used. In the second method one only
484 * has to exclusively lock one of constituent pages but then one has to
485 * acquire further locks by calling page_szc_lock() and
486 * hat_page_demote(). hat_page_demote() acquires hat level locks and then
487 * demotes the page. This mechanism relies on the fact that any code that
488 * needs to prevent p_szc of a file system large page from changeing either
489 * locks all constituent large pages at least SHARED or locks some pages at
490 * least SHARED and calls page_szc_lock() or uses hat level page locks.
491 * Demotion using this method is implemented by page_demote_vp_pages().
492 * Please see comments in front of page_demote_vp_pages(), hat_page_demote()
493 * and page_szc_lock() for more details.
494 *
495 * Lock order: p_selock, page_szc_lock, ph_mutex/vph_mutex/freelist,
496 * hat level locks.
497 */
498
499 typedef struct page {
500 u_offset_t p_offset; /* offset into vnode for this page */
501 struct vnode *p_vnode; /* vnode that this page is named by */
502 selock_t p_selock; /* shared/exclusive lock on the page */
503 #if defined(_LP64)
504 uint_t p_vpmref; /* vpm ref - index of the vpmap_t */
505 #endif
506 struct page *p_hash; /* hash by [vnode, offset] */
507 struct page *p_vpnext; /* next page in vnode list */
508 struct page *p_vpprev; /* prev page in vnode list */
509 struct page *p_next; /* next page in free/intrans lists */
510 struct page *p_prev; /* prev page in free/intrans lists */
511 ushort_t p_lckcnt; /* number of locks on page data */
512 ushort_t p_cowcnt; /* number of copy on write lock */
513 kcondvar_t p_cv; /* page struct's condition var */
514 kcondvar_t p_io_cv; /* for iolock */
515 uchar_t p_iolock_state; /* replaces p_iolock */
516 volatile uchar_t p_szc; /* page size code */
517 uchar_t p_fsdata; /* file system dependent byte */
518 uchar_t p_state; /* p_free, p_noreloc */
519 uchar_t p_nrm; /* non-cache, ref, mod readonly bits */
520 #if defined(__sparc)
521 uchar_t p_vcolor; /* virtual color */
522 #else
523 uchar_t p_embed; /* x86 - changes p_mapping & p_index */
524 #endif
525 uchar_t p_index; /* MPSS mapping info. Not used on x86 */
526 uchar_t p_toxic; /* page has an unrecoverable error */
527 void *p_mapping; /* hat specific translation info */
528 pfn_t p_pagenum; /* physical page number */
529
530 uint_t p_share; /* number of translations */
531 #if defined(_LP64)
532 uint_t p_sharepad; /* pad for growing p_share */
533 #endif
534 uint_t p_slckcnt; /* number of softlocks */
535 #if defined(__sparc)
536 uint_t p_kpmref; /* number of kpm mapping sharers */
537 struct kpme *p_kpmelist; /* kpm specific mapping info */
538 #else
539 /* index of entry in p_map when p_embed is set */
540 uint_t p_mlentry;
541 #endif
542 #if defined(_LP64)
543 kmutex_t p_ilock; /* protects p_vpmref */
544 #else
545 uint64_t p_msresv_2; /* page allocation debugging */
546 #endif
547 } page_t;
548
549
550 typedef page_t devpage_t;
551 #define devpage page
552
553 #define PAGE_LOCK_MAXIMUM \
554 ((1 << (sizeof (((page_t *)0)->p_lckcnt) * NBBY)) - 1)
555
556 #define PAGE_SLOCK_MAXIMUM UINT_MAX
557
558 /*
559 * Page hash table is a power-of-two in size, externally chained
560 * through the hash field. PAGE_HASHAVELEN is the average length
561 * desired for this chain, from which the size of the page_hash
562 * table is derived at boot time and stored in the kernel variable
563 * page_hashsz. In the hash function it is given by PAGE_HASHSZ.
564 *
565 * PAGE_HASH_FUNC returns an index into the page_hash[] array. This
566 * index is also used to derive the mutex that protects the chain.
567 *
568 * In constructing the hash function, first we dispose of unimportant bits
569 * (page offset from "off" and the low 3 bits of "vp" which are zero for
570 * struct alignment). Then shift and sum the remaining bits a couple times
571 * in order to get as many source bits from the two source values into the
572 * resulting hashed value. Note that this will perform quickly, since the
573 * shifting/summing are fast register to register operations with no additional
574 * memory references).
575 *
576 * PH_SHIFT_SIZE is the amount to use for the successive shifts in the hash
577 * function below. The actual value is LOG2(PH_TABLE_SIZE), so that as many
578 * bits as possible will filter thru PAGE_HASH_FUNC() and PAGE_HASH_MUTEX().
579 *
580 * We use ? : instead of #if because <vm/page.h> is included everywhere;
581 * NCPU maps to a global variable outside of the "unix" module.
582 */
583 #if defined(_LP64)
584 #define PH_SHIFT_SIZE ((NCPU < 4) ? 7 : (NCPU_LOG2 + 1))
585 #else /* 32 bits */
586 #define PH_SHIFT_SIZE ((NCPU < 4) ? 4 : 7)
587 #endif /* _LP64 */
588
589 #define PH_TABLE_SIZE (1ul << PH_SHIFT_SIZE)
590
591 /*
592 *
593 * We take care to get as much randomness as possible from both the vp and
594 * the offset. Workloads can have few vnodes with many offsets, many vnodes
595 * with few offsets or a moderate mix of both. This hash should perform
596 * equally well for each of these possibilities and for all types of memory
597 * allocations.
598 *
599 * vnodes representing files are created over a long period of time and
600 * have good variation in the upper vp bits, and the right shifts below
601 * capture these bits. However, swap vnodes are created quickly in a
602 * narrow vp* range. Refer to comments at swap_alloc: vnum has exactly
603 * AN_VPSHIFT bits, so the kmem_alloc'd vnode addresses have approximately
604 * AN_VPSHIFT bits of variation above their VNODE_ALIGN low order 0 bits.
605 * Spread swap vnodes widely in the hash table by XOR'ing a term with the
606 * vp bits of variation left shifted to the top of the range.
607 */
608
609 #define PAGE_HASHSZ page_hashsz
610 #define PAGE_HASHAVELEN 4
611 #define PAGE_HASH_FUNC(vp, off) \
612 (((((uintptr_t)(off) >> PAGESHIFT) ^ \
613 ((uintptr_t)(off) >> (PAGESHIFT + PH_SHIFT_SIZE))) ^ \
614 (((uintptr_t)(vp) >> 3) ^ \
615 ((uintptr_t)(vp) >> (3 + PH_SHIFT_SIZE)) ^ \
616 ((uintptr_t)(vp) >> (3 + 2 * PH_SHIFT_SIZE)) ^ \
617 ((uintptr_t)(vp) << \
618 (page_hashsz_shift - AN_VPSHIFT - VNODE_ALIGN_LOG2)))) & \
619 (PAGE_HASHSZ - 1))
620
621 #ifdef _KERNEL
622
623 /*
624 * The page hash value is re-hashed to an index for the ph_mutex array.
625 *
626 * For 64 bit kernels, the mutex array is padded out to prevent false
627 * sharing of cache sub-blocks (64 bytes) of adjacent mutexes.
628 *
629 * For 32 bit kernels, we don't want to waste kernel address space with
630 * padding, so instead we rely on the hash function to introduce skew of
631 * adjacent vnode/offset indexes (the left shift part of the hash function).
632 * Since sizeof (kmutex_t) is 8, we shift an additional 3 to skew to a different
633 * 64 byte sub-block.
634 */
635 extern pad_mutex_t ph_mutex[];
636
637 #define PAGE_HASH_MUTEX(x) \
638 &(ph_mutex[((x) ^ ((x) >> PH_SHIFT_SIZE) + ((x) << 3)) & \
639 (PH_TABLE_SIZE - 1)].pad_mutex)
640
641 /*
642 * Flags used while creating pages.
643 */
644 #define PG_EXCL 0x0001
645 #define PG_WAIT 0x0002 /* Blocking memory allocations */
646 #define PG_PHYSCONTIG 0x0004 /* NOT SUPPORTED */
647 #define PG_MATCH_COLOR 0x0008 /* SUPPORTED by free list routines */
648 #define PG_NORELOC 0x0010 /* Non-relocatable alloc hint. */
649 /* Page must be PP_ISNORELOC */
650 #define PG_PANIC 0x0020 /* system will panic if alloc fails */
651 #define PG_PUSHPAGE 0x0040 /* alloc may use reserve */
652 #define PG_LOCAL 0x0080 /* alloc from given lgrp only */
653 #define PG_NORMALPRI 0x0100 /* PG_WAIT like priority, but */
654 /* non-blocking */
655 /*
656 * When p_selock has the SE_EWANTED bit set, threads waiting for SE_EXCL
657 * access are given priority over all other waiting threads.
658 */
659 #define SE_EWANTED 0x40000000
660 #define PAGE_LOCKED(pp) (((pp)->p_selock & ~SE_EWANTED) != 0)
661 #define PAGE_SHARED(pp) (((pp)->p_selock & ~SE_EWANTED) > 0)
662 #define PAGE_EXCL(pp) ((pp)->p_selock < 0)
663 #define PAGE_LOCKED_SE(pp, se) \
664 ((se) == SE_EXCL ? PAGE_EXCL(pp) : PAGE_SHARED(pp))
665
666 extern long page_hashsz;
667 extern unsigned int page_hashsz_shift;
668 extern page_t **page_hash;
669
670 extern pad_mutex_t page_llocks[]; /* page logical lock mutex */
671 extern kmutex_t freemem_lock; /* freemem lock */
672
673 extern pgcnt_t total_pages; /* total pages in the system */
674
675 /*
676 * Variables controlling locking of physical memory.
677 */
678 extern volatile pgcnt_t pages_pp_maximum; /* tuning: lock + claim <= max */
679 extern void init_pages_pp_maximum(void);
680
681 struct lgrp;
682
683 /* page_list_{add,sub} flags */
684
685 /* which list */
686 #define PG_FREE_LIST 0x0001
687 #define PG_CACHE_LIST 0x0002
688
689 /* where on list */
690 #define PG_LIST_TAIL 0x0010
691 #define PG_LIST_HEAD 0x0020
692
693 /* called from */
694 #define PG_LIST_ISINIT 0x1000
695
696 /*
697 * Page frame operations.
698 */
699 page_t *page_lookup(struct vnode *, u_offset_t, se_t);
700 page_t *page_lookup_create(struct vnode *, u_offset_t, se_t, page_t *,
701 spgcnt_t *, int);
702 page_t *page_lookup_nowait(struct vnode *, u_offset_t, se_t);
703 page_t *page_find(struct vnode *, u_offset_t);
704 page_t *page_exists(struct vnode *, u_offset_t);
705 int page_exists_physcontig(vnode_t *, u_offset_t, uint_t, page_t *[]);
706 int page_exists_forreal(struct vnode *, u_offset_t, uint_t *);
707 void page_needfree(spgcnt_t);
708 page_t *page_create(struct vnode *, u_offset_t, size_t, uint_t);
709 int page_alloc_pages(struct vnode *, struct seg *, caddr_t, page_t **,
710 page_t **, uint_t, int, int);
711 page_t *page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes,
712 uint_t flags, struct seg *seg, caddr_t vaddr, void *arg);
713 page_t *page_create_va(struct vnode *, u_offset_t, size_t, uint_t,
714 struct seg *, caddr_t);
715 int page_create_wait(pgcnt_t npages, uint_t flags);
716 void page_create_putback(spgcnt_t npages);
717 void page_free(page_t *, int);
718 void page_free_at_startup(page_t *);
719 void page_free_pages(page_t *);
720 void free_vp_pages(struct vnode *, u_offset_t, size_t);
721 int page_reclaim(page_t *, kmutex_t *);
722 int page_reclaim_pages(page_t *, kmutex_t *, uint_t);
723 void page_destroy(page_t *, int);
724 void page_destroy_pages(page_t *);
725 void page_destroy_free(page_t *);
726 void page_rename(page_t *, struct vnode *, u_offset_t);
727 int page_hashin(page_t *, struct vnode *, u_offset_t, kmutex_t *);
728 void page_hashout(page_t *, kmutex_t *);
729 int page_num_hashin(pfn_t, struct vnode *, u_offset_t);
730 void page_add(page_t **, page_t *);
731 void page_add_common(page_t **, page_t *);
732 void page_sub(page_t **, page_t *);
733 void page_sub_common(page_t **, page_t *);
734 page_t *page_get_freelist(struct vnode *, u_offset_t, struct seg *,
735 caddr_t, size_t, uint_t, struct lgrp *);
736
737 page_t *page_get_cachelist(struct vnode *, u_offset_t, struct seg *,
738 caddr_t, uint_t, struct lgrp *);
739 #if defined(__i386) || defined(__amd64)
740 int page_chk_freelist(uint_t);
741 #endif
742 void page_list_add(page_t *, int);
743 void page_boot_demote(page_t *);
744 void page_promote_size(page_t *, uint_t);
745 void page_list_add_pages(page_t *, int);
746 void page_list_sub(page_t *, int);
747 void page_list_sub_pages(page_t *, uint_t);
748 void page_list_xfer(page_t *, int, int);
749 void page_list_break(page_t **, page_t **, size_t);
750 void page_list_concat(page_t **, page_t **);
751 void page_vpadd(page_t **, page_t *);
752 void page_vpsub(page_t **, page_t *);
753 int page_lock(page_t *, se_t, kmutex_t *, reclaim_t);
754 int page_lock_es(page_t *, se_t, kmutex_t *, reclaim_t, int);
755 void page_lock_clr_exclwanted(page_t *);
756 int page_trylock(page_t *, se_t);
757 int page_try_reclaim_lock(page_t *, se_t, int);
758 int page_tryupgrade(page_t *);
759 void page_downgrade(page_t *);
760 void page_unlock(page_t *);
761 void page_unlock_nocapture(page_t *);
762 void page_lock_delete(page_t *);
763 int page_deleted(page_t *);
764 int page_pp_lock(page_t *, int, int);
765 void page_pp_unlock(page_t *, int, int);
766 int page_resv(pgcnt_t, uint_t);
767 void page_unresv(pgcnt_t);
768 void page_pp_useclaim(page_t *, page_t *, uint_t);
769 int page_addclaim(page_t *);
770 int page_subclaim(page_t *);
771 int page_addclaim_pages(page_t **);
772 int page_subclaim_pages(page_t **);
773 pfn_t page_pptonum(page_t *);
774 page_t *page_numtopp(pfn_t, se_t);
775 page_t *page_numtopp_noreclaim(pfn_t, se_t);
776 page_t *page_numtopp_nolock(pfn_t);
777 page_t *page_numtopp_nowait(pfn_t, se_t);
778 page_t *page_first();
779 page_t *page_next(page_t *);
780 page_t *page_list_next(page_t *);
781 page_t *page_nextn(page_t *, ulong_t);
782 page_t *page_next_scan_init(void **);
783 page_t *page_next_scan_large(page_t *, ulong_t *, void **);
784 void prefetch_page_r(void *);
785 int ppcopy(page_t *, page_t *);
786 void page_relocate_hash(page_t *, page_t *);
787 void pagezero(page_t *, uint_t, uint_t);
788 void pagescrub(page_t *, uint_t, uint_t);
789 void page_io_lock(page_t *);
790 void page_io_unlock(page_t *);
791 int page_io_trylock(page_t *);
792 int page_iolock_assert(page_t *);
793 void page_iolock_init(page_t *);
794 void page_io_wait(page_t *);
795 int page_io_locked(page_t *);
796 pgcnt_t page_busy(int);
797 void page_lock_init(void);
798 ulong_t page_share_cnt(page_t *);
799 int page_isshared(page_t *);
800 int page_isfree(page_t *);
801 int page_isref(page_t *);
802 int page_ismod(page_t *);
803 int page_release(page_t *, int);
804 void page_retire_init(void);
805 int page_retire(uint64_t, uchar_t);
806 int page_retire_check(uint64_t, uint64_t *);
807 int page_unretire(uint64_t);
808 int page_unretire_pp(page_t *, int);
809 void page_tryretire(page_t *);
810 void page_retire_mdboot();
811 uint64_t page_retire_pend_count(void);
812 uint64_t page_retire_pend_kas_count(void);
813 void page_retire_incr_pend_count(void *);
814 void page_retire_decr_pend_count(void *);
815 void page_clrtoxic(page_t *, uchar_t);
816 void page_settoxic(page_t *, uchar_t);
817
818 int page_reclaim_mem(pgcnt_t, pgcnt_t, int);
819
820 void page_set_props(page_t *, uint_t);
821 void page_clr_all_props(page_t *);
822 int page_clear_lck_cow(page_t *, int);
823
824 kmutex_t *page_vnode_mutex(struct vnode *);
825 kmutex_t *page_se_mutex(struct page *);
826 kmutex_t *page_szc_lock(struct page *);
827 int page_szc_lock_assert(struct page *pp);
828
829 /*
830 * Page relocation interfaces. page_relocate() is generic.
831 * page_get_replacement_page() is provided by the PSM.
832 * page_free_replacement_page() is generic.
833 */
834 int group_page_trylock(page_t *, se_t);
835 void group_page_unlock(page_t *);
836 int page_relocate(page_t **, page_t **, int, int, spgcnt_t *, struct lgrp *);
837 int do_page_relocate(page_t **, page_t **, int, spgcnt_t *, struct lgrp *);
838 page_t *page_get_replacement_page(page_t *, struct lgrp *, uint_t);
839 void page_free_replacement_page(page_t *);
840 int page_relocate_cage(page_t **, page_t **);
841
842 int page_try_demote_pages(page_t *);
843 int page_try_demote_free_pages(page_t *);
844 void page_demote_free_pages(page_t *);
845
846 struct anon_map;
847
848 void page_mark_migrate(struct seg *, caddr_t, size_t, struct anon_map *,
849 ulong_t, vnode_t *, u_offset_t, int);
850 void page_migrate(struct seg *, caddr_t, page_t **, pgcnt_t);
851
852 /*
853 * Tell the PIM we are adding physical memory
854 */
855 void add_physmem(page_t *, size_t, pfn_t);
856 void add_physmem_cb(page_t *, pfn_t); /* callback for page_t part */
857
858 /*
859 * hw_page_array[] is configured with hardware supported page sizes by
860 * platform specific code.
861 */
862 typedef struct {
863 size_t hp_size;
864 uint_t hp_shift;
865 uint_t hp_colors;
866 pgcnt_t hp_pgcnt; /* base pagesize cnt */
867 } hw_pagesize_t;
868
869 extern hw_pagesize_t hw_page_array[];
870 extern uint_t page_coloring_shift;
871 extern uint_t page_colors_mask;
872 extern int cpu_page_colors;
873 extern uint_t colorequiv;
874 extern uchar_t colorequivszc[];
875
876 uint_t page_num_pagesizes(void);
877 uint_t page_num_user_pagesizes(int);
878 size_t page_get_pagesize(uint_t);
879 size_t page_get_user_pagesize(uint_t n);
880 pgcnt_t page_get_pagecnt(uint_t);
881 uint_t page_get_shift(uint_t);
882 int page_szc(size_t);
883 int page_szc_user_filtered(size_t);
884
885 /* page_get_replacement page flags */
886 #define PGR_SAMESZC 0x1 /* only look for page size same as orig */
887 #define PGR_NORELOC 0x2 /* allocate a P_NORELOC page */
888
889 /*
890 * macros for "masked arithmetic"
891 * The purpose is to step through all combinations of a set of bits while
892 * keeping some other bits fixed. Fixed bits need not be contiguous. The
893 * variable bits need not be contiguous either, or even right aligned. The
894 * trick is to set all fixed bits to 1, then increment, then restore the
895 * fixed bits. If incrementing causes a carry from a low bit position, the
896 * carry propagates thru the fixed bits, because they are temporarily set to 1.
897 * v is the value
898 * i is the increment
899 * eq_mask defines the fixed bits
900 * mask limits the size of the result
901 */
902 #define ADD_MASKED(v, i, eq_mask, mask) \
903 (((((v) | (eq_mask)) + (i)) & (mask) & ~(eq_mask)) | ((v) & (eq_mask)))
904
905 /*
906 * convenience macro which increments by 1
907 */
908 #define INC_MASKED(v, eq_mask, mask) ADD_MASKED(v, 1, eq_mask, mask)
909
910 #endif /* _KERNEL */
911
912 /*
913 * Constants used for the p_iolock_state
914 */
915 #define PAGE_IO_INUSE 0x1
916 #define PAGE_IO_WANTED 0x2
917
918 /*
919 * Constants used for page_release status
920 */
921 #define PGREL_NOTREL 0x1
922 #define PGREL_CLEAN 0x2
923 #define PGREL_MOD 0x3
924
925 /*
926 * The p_state field holds what used to be the p_age and p_free
927 * bits. These fields are protected by p_selock (see above).
928 */
929 #define P_FREE 0x80 /* Page on free list */
930 #define P_NORELOC 0x40 /* Page is non-relocatable */
931 #define P_MIGRATE 0x20 /* Migrate page on next touch */
932 #define P_SWAP 0x10 /* belongs to vnode that is V_ISSWAP */
933 #define P_BOOTPAGES 0x08 /* member of bootpages list */
934 #define P_RAF 0x04 /* page retired at free */
935
936 #define PP_ISFREE(pp) ((pp)->p_state & P_FREE)
937 #define PP_ISAGED(pp) (((pp)->p_state & P_FREE) && \
938 ((pp)->p_vnode == NULL))
939 #define PP_ISNORELOC(pp) ((pp)->p_state & P_NORELOC)
940 #define PP_ISKAS(pp) (VN_ISKAS((pp)->p_vnode))
941 #define PP_ISNORELOCKERNEL(pp) (PP_ISNORELOC(pp) && PP_ISKAS(pp))
942 #define PP_ISMIGRATE(pp) ((pp)->p_state & P_MIGRATE)
943 #define PP_ISSWAP(pp) ((pp)->p_state & P_SWAP)
944 #define PP_ISBOOTPAGES(pp) ((pp)->p_state & P_BOOTPAGES)
945 #define PP_ISRAF(pp) ((pp)->p_state & P_RAF)
946
947 #define PP_SETFREE(pp) ((pp)->p_state = ((pp)->p_state & ~P_MIGRATE) \
948 | P_FREE)
949 #define PP_SETAGED(pp) ASSERT(PP_ISAGED(pp))
950 #define PP_SETNORELOC(pp) ((pp)->p_state |= P_NORELOC)
951 #define PP_SETMIGRATE(pp) ((pp)->p_state |= P_MIGRATE)
952 #define PP_SETSWAP(pp) ((pp)->p_state |= P_SWAP)
953 #define PP_SETBOOTPAGES(pp) ((pp)->p_state |= P_BOOTPAGES)
954 #define PP_SETRAF(pp) ((pp)->p_state |= P_RAF)
955
956 #define PP_CLRFREE(pp) ((pp)->p_state &= ~P_FREE)
957 #define PP_CLRAGED(pp) ASSERT(!PP_ISAGED(pp))
958 #define PP_CLRNORELOC(pp) ((pp)->p_state &= ~P_NORELOC)
959 #define PP_CLRMIGRATE(pp) ((pp)->p_state &= ~P_MIGRATE)
960 #define PP_CLRSWAP(pp) ((pp)->p_state &= ~P_SWAP)
961 #define PP_CLRBOOTPAGES(pp) ((pp)->p_state &= ~P_BOOTPAGES)
962 #define PP_CLRRAF(pp) ((pp)->p_state &= ~P_RAF)
963
964 /*
965 * Flags for page_t p_toxic, for tracking memory hardware errors.
966 *
967 * These flags are OR'ed into p_toxic with page_settoxic() to track which
968 * error(s) have occurred on a given page. The flags are cleared with
969 * page_clrtoxic(). Both page_settoxic() and page_cleartoxic use atomic
970 * primitives to manipulate the p_toxic field so no other locking is needed.
971 *
972 * When an error occurs on a page, p_toxic is set to record the error. The
973 * error could be a memory error or something else (i.e. a datapath). The Page
974 * Retire mechanism does not try to determine the exact cause of the error;
975 * Page Retire rightly leaves that sort of determination to FMA's Diagnostic
976 * Engine (DE).
977 *
978 * Note that, while p_toxic bits can be set without holding any locks, they
979 * should only be cleared while holding the page exclusively locked.
980 * There is one exception to this, the PR_CAPTURE bit is protected by a mutex
981 * within the page capture logic and thus to set or clear the bit, that mutex
982 * needs to be held. The page does not need to be locked but the page_clrtoxic
983 * function must be used as we need an atomic operation.
984 * Also note that there is what amounts to a hack to prevent recursion with
985 * large pages such that if we are unlocking a page and the PR_CAPTURE bit is
986 * set, we will only try to capture the page if the current threads T_CAPTURING
987 * flag is not set. If the flag is set, the unlock will not try to capture
988 * the page even though the PR_CAPTURE bit is set.
989 *
990 * Pages with PR_UE or PR_FMA flags are retired unconditionally, while pages
991 * with PR_MCE are retired if the system has not retired too many of them.
992 *
993 * A page must be exclusively locked to be retired. Pages can be retired if
994 * they are mapped, modified, or both, as long as they are not marked PR_UE,
995 * since pages with uncorrectable errors cannot be relocated in memory.
996 * Once a page has been successfully retired it is zeroed, attached to the
997 * retired_pages vnode and, finally, PR_RETIRED is set in p_toxic. The other
998 * p_toxic bits are NOT cleared. Pages are not left locked after retiring them
999 * to avoid special case code throughout the kernel; rather, page_*lock() will
1000 * fail to lock the page, unless SE_RETIRED is passed as an argument.
1001 *
1002 * While we have your attention, go take a look at the comments at the
1003 * beginning of page_retire.c too.
1004 */
1005 #define PR_OK 0x00 /* no problem */
1006 #define PR_MCE 0x01 /* page has seen two or more CEs */
1007 #define PR_UE 0x02 /* page has an unhandled UE */
1008 #define PR_UE_SCRUBBED 0x04 /* page has seen a UE but was cleaned */
1009 #define PR_FMA 0x08 /* A DE wants this page retired */
1010 #define PR_CAPTURE 0x10 /* page is hashed on page_capture_hash[] */
1011 #define PR_RESV 0x20 /* Reserved for future use */
1012 #define PR_MSG 0x40 /* message(s) already printed for this page */
1013 #define PR_RETIRED 0x80 /* This page has been retired */
1014
1015 #define PR_REASONS (PR_UE | PR_MCE | PR_FMA)
1016 #define PR_TOXIC (PR_UE)
1017 #define PR_ERRMASK (PR_UE | PR_UE_SCRUBBED | PR_MCE | PR_FMA)
1018 #define PR_TOXICFLAGS (0xCF)
1019
1020 #define PP_RETIRED(pp) ((pp)->p_toxic & PR_RETIRED)
1021 #define PP_TOXIC(pp) ((pp)->p_toxic & PR_TOXIC)
1022 #define PP_PR_REQ(pp) (((pp)->p_toxic & PR_REASONS) && !PP_RETIRED(pp))
1023 #define PP_PR_NOSHARE(pp) \
1024 ((((pp)->p_toxic & (PR_RETIRED | PR_FMA | PR_UE)) == PR_FMA) && \
1025 !PP_ISKAS(pp))
1026
1027 /*
1028 * Flags for page_unretire_pp
1029 */
1030 #define PR_UNR_FREE 0x1
1031 #define PR_UNR_CLEAN 0x2
1032 #define PR_UNR_TEMP 0x4
1033
1034 /*
1035 * kpm large page description.
1036 * The virtual address range of segkpm is divided into chunks of
1037 * kpm_pgsz. Each chunk is controlled by a kpm_page_t. The ushort
1038 * is sufficient for 2^^15 * PAGESIZE, so e.g. the maximum kpm_pgsz
1039 * for 8K is 256M and 2G for 64K pages. It it kept as small as
1040 * possible to save physical memory space.
1041 *
1042 * There are 2 segkpm mapping windows within in the virtual address
1043 * space when we have to prevent VAC alias conflicts. The so called
1044 * Alias window (mappings are always by PAGESIZE) is controlled by
1045 * kp_refcnta. The regular window is controlled by kp_refcnt for the
1046 * normal operation, which is to use the largest available pagesize.
1047 * When VAC alias conflicts are present within a chunk in the regular
1048 * window the large page mapping is broken up into smaller PAGESIZE
1049 * mappings. kp_refcntc is used to control the pages that are invoked
1050 * in the conflict and kp_refcnts holds the active mappings done
1051 * with the small page size. In non vac conflict mode kp_refcntc is
1052 * also used as "go" indication (-1) for the trap level tsbmiss
1053 * handler.
1054 */
1055 typedef struct kpm_page {
1056 short kp_refcnt; /* pages mapped large */
1057 short kp_refcnta; /* pages mapped in Alias window */
1058 short kp_refcntc; /* TL-tsbmiss flag; #vac alias conflict pages */
1059 short kp_refcnts; /* vac alias: pages mapped small */
1060 } kpm_page_t;
1061
1062 /*
1063 * Note: khl_lock offset changes must be reflected in sfmmu_asm.s
1064 */
1065 typedef struct kpm_hlk {
1066 kmutex_t khl_mutex; /* kpm_page mutex */
1067 uint_t khl_lock; /* trap level tsbmiss handling */
1068 } kpm_hlk_t;
1069
1070 /*
1071 * kpm small page description.
1072 * When kpm_pgsz is equal to PAGESIZE a smaller representation is used
1073 * to save memory space. Alias range mappings and regular segkpm
1074 * mappings are done in units of PAGESIZE and can share the mapping
1075 * information and the mappings are always distinguishable by their
1076 * virtual address. Other information needed for VAC conflict prevention
1077 * is already available on a per page basis.
1078 *
1079 * The state about how a kpm page is mapped and whether it is ready to go
1080 * is indicated by the following 1 byte kpm_spage structure. This byte is
1081 * split into two 4-bit parts - kp_mapped and kp_mapped_go.
1082 * - kp_mapped == 1 the page is mapped cacheable
1083 * - kp_mapped == 2 the page is mapped non-cacheable
1084 * - kp_mapped_go == 1 the mapping is ready to be dropped in
1085 * - kp_mapped_go == 0 the mapping is not ready to be dropped in.
1086 * When kp_mapped_go == 0, we will have C handler resolve the VAC conflict.
1087 * Otherwise, the assembly tsb miss handler can simply drop in the mapping
1088 * when a tsb miss occurs.
1089 */
1090 typedef union kpm_spage {
1091 struct {
1092 #ifdef _BIG_ENDIAN
1093 uchar_t mapped_go: 4; /* go or nogo flag */
1094 uchar_t mapped: 4; /* page mapped small */
1095 #else
1096 uchar_t mapped: 4; /* page mapped small */
1097 uchar_t mapped_go: 4; /* go or nogo flag */
1098 #endif
1099 } kpm_spage_un;
1100 uchar_t kp_mapped_flag;
1101 } kpm_spage_t;
1102
1103 #define kp_mapped kpm_spage_un.mapped
1104 #define kp_mapped_go kpm_spage_un.mapped_go
1105
1106 /*
1107 * Note: kshl_lock offset changes must be reflected in sfmmu_asm.s
1108 */
1109 typedef struct kpm_shlk {
1110 uint_t kshl_lock; /* trap level tsbmiss handling */
1111 } kpm_shlk_t;
1112
1113 /*
1114 * Each segment of physical memory is described by a memseg struct.
1115 * Within a segment, memory is considered contiguous. The members
1116 * can be categorized as follows:
1117 * . Platform independent:
1118 * pages, epages, pages_base, pages_end, next, lnext.
1119 * . 64bit only but platform independent:
1120 * kpm_pbase, kpm_nkpmpgs, kpm_pages, kpm_spages.
1121 * . Really platform or mmu specific:
1122 * pagespa, epagespa, nextpa, kpm_pagespa.
1123 * . Mixed:
1124 * msegflags.
1125 */
1126 struct memseg {
1127 page_t *pages, *epages; /* [from, to] in page array */
1128 pfn_t pages_base, pages_end; /* [from, to] in page numbers */
1129 struct memseg *next; /* next segment in list */
1130 struct memseg *lnext; /* next segment in deleted list */
1131 #if defined(__sparc)
1132 uint64_t pagespa, epagespa; /* [from, to] page array physical */
1133 uint64_t nextpa; /* physical next pointer */
1134 pfn_t kpm_pbase; /* start of kpm range */
1135 pgcnt_t kpm_nkpmpgs; /* # of kpm_pgsz pages */
1136 union _mseg_un {
1137 kpm_page_t *kpm_lpgs; /* ptr to kpm_page array */
1138 kpm_spage_t *kpm_spgs; /* ptr to kpm_spage array */
1139 } mseg_un;
1140 uint64_t kpm_pagespa; /* physical ptr to kpm (s)pages array */
1141 #endif /* __sparc */
1142 uint_t msegflags; /* memseg flags */
1143 };
1144
1145 /* memseg union aliases */
1146 #define kpm_pages mseg_un.kpm_lpgs
1147 #define kpm_spages mseg_un.kpm_spgs
1148
1149 /* msegflags */
1150 #define MEMSEG_DYNAMIC 0x1 /* DR: memory was added dynamically */
1151 #define MEMSEG_META_INCL 0x2 /* DR: memseg includes it's metadata */
1152 #define MEMSEG_META_ALLOC 0x4 /* DR: memseg allocated it's metadata */
1153
1154 /* memseg support macros */
1155 #define MSEG_NPAGES(SEG) ((SEG)->pages_end - (SEG)->pages_base)
1156
1157 /* memseg hash */
1158 #define MEM_HASH_SHIFT 0x9
1159 #define N_MEM_SLOTS 0x200 /* must be a power of 2 */
1160 #define MEMSEG_PFN_HASH(pfn) (((pfn)/mhash_per_slot) & (N_MEM_SLOTS - 1))
1161
1162 /* memseg externals */
1163 extern struct memseg *memsegs; /* list of memory segments */
1164 extern ulong_t mhash_per_slot;
1165 extern uint64_t memsegspa; /* memsegs as physical address */
1166
1167 void build_pfn_hash();
1168 extern struct memseg *page_numtomemseg_nolock(pfn_t pfnum);
1169
1170 /*
1171 * page capture related info:
1172 * The page capture routines allow us to asynchronously capture given pages
1173 * for the explicit use of the requestor. New requestors can be added by
1174 * explicitly adding themselves to the PC_* flags below and incrementing
1175 * PC_NUM_CALLBACKS as necessary.
1176 *
1177 * Subsystems using page capture must register a callback before attempting
1178 * to capture a page. A duration of -1 will indicate that we will never give
1179 * up while trying to capture a page and will only stop trying to capture the
1180 * given page once we have successfully captured it. Thus the user needs to be
1181 * aware of the behavior of all callers who have a duration of -1.
1182 *
1183 * For now, only /dev/physmem and page retire use the page capture interface
1184 * and only a single request can be outstanding for a given page. Thus, if
1185 * /dev/phsymem wants a page and page retire also wants the same page, only
1186 * the page retire request will be honored until the point in time that the
1187 * page is actually retired, at which point in time, subsequent requests by
1188 * /dev/physmem will succeed if the CAPTURE_GET_RETIRED flag was set.
1189 */
1190
1191 #define PC_RETIRE (0)
1192 #define PC_PHYSMEM (1)
1193 #define PC_NUM_CALLBACKS (2)
1194 #define PC_MASK ((1 << PC_NUM_CALLBACKS) - 1)
1195
1196 #define CAPTURE_RETIRE (1 << PC_RETIRE)
1197 #define CAPTURE_PHYSMEM (1 << PC_PHYSMEM)
1198
1199 #define CAPTURE_ASYNC (0x0200)
1200
1201 #define CAPTURE_GET_RETIRED (0x1000)
1202 #define CAPTURE_GET_CAGE (0x2000)
1203
1204 struct page_capture_callback {
1205 int cb_active; /* 1 means active, 0 means inactive */
1206 clock_t duration; /* the length in time that we'll attempt to */
1207 /* capture this page asynchronously. (in HZ) */
1208 krwlock_t cb_rwlock;
1209 int (*cb_func)(page_t *, void *, uint_t); /* callback function */
1210 };
1211
1212 extern kcondvar_t pc_cv;
1213
1214 void page_capture_register_callback(uint_t index, clock_t duration,
1215 int (*cb_func)(page_t *, void *, uint_t));
1216 void page_capture_unregister_callback(uint_t index);
1217 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap);
1218 void page_unlock_capture(page_t *pp);
1219 int page_capture_unretire_pp(page_t *);
1220
1221 extern int memsegs_trylock(int);
1222 extern void memsegs_lock(int);
1223 extern void memsegs_unlock(int);
1224 extern int memsegs_lock_held(void);
1225 extern void memlist_read_lock(void);
1226 extern void memlist_read_unlock(void);
1227 extern void memlist_write_lock(void);
1228 extern void memlist_write_unlock(void);
1229
1230 #ifdef __cplusplus
1231 }
1232 #endif
1233
1234 #endif /* _VM_PAGE_H */