Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/vm/hat.h
+++ new/usr/src/uts/common/vm/hat.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright 2014 Joyent, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 #ifndef _VM_HAT_H
41 41 #define _VM_HAT_H
42 42
43 43 #include <sys/types.h>
44 44 #include <sys/t_lock.h>
45 45 #include <vm/faultcode.h>
46 46 #include <sys/kstat.h>
47 47 #include <sys/siginfo.h>
48 48
49 49 #ifdef __cplusplus
50 50 extern "C" {
51 51 #endif
52 52
53 53 /*
54 54 * VM - Hardware Address Translation management.
55 55 *
56 56 * This file describes the machine independent interfaces to
57 57 * the hardware address translation management routines. Other
58 58 * machine specific interfaces and structures are defined
59 59 * in <vm/hat_xxx.h>. The hat layer manages the address
60 60 * translation hardware as a cache driven by calls from the
61 61 * higher levels of the VM system.
62 62 */
63 63
64 64 struct hat;
65 65 struct kpme;
66 66 struct memseg;
67 67
68 68 #include <vm/page.h>
69 69
70 70 /*
71 71 * a callback used with hat_unload_callback()
72 72 * start and end mark are set to a range of unloaded addresses
73 73 * and the function is invoked with a pointer to this data structure
74 74 */
75 75 typedef struct hat_callback {
76 76 caddr_t hcb_start_addr;
77 77 caddr_t hcb_end_addr;
78 78 void (*hcb_function)(struct hat_callback *);
79 79 void *hcb_data;
80 80 } hat_callback_t;
81 81
82 82 typedef void *hat_region_cookie_t;
83 83
84 84 #ifdef _KERNEL
85 85
86 86 /*
87 87 * One time hat initialization
88 88 */
89 89 void hat_init(void);
90 90
91 91 /*
92 92 * Notify hat of a system dump
93 93 */
94 94 void hat_dump(void);
95 95
96 96 /*
97 97 * Operations on an address space:
98 98 *
99 99 * struct hat *hat_alloc(as)
100 100 * allocated a hat structure for as.
101 101 *
102 102 * void hat_free_start(hat)
103 103 * informs hat layer process has finished executing but as has not
104 104 * been cleaned up yet.
105 105 *
106 106 * void hat_free_end(hat)
107 107 * informs hat layer as is being destroyed. hat layer cannot use as
108 108 * pointer after this call.
109 109 *
110 110 * void hat_swapin(hat)
111 111 * allocate any hat resources required for process being swapped in.
112 112 *
113 113 * void hat_swapout(hat)
114 114 * deallocate hat resources for process being swapped out.
115 115 *
116 116 * size_t hat_get_mapped_size(hat)
117 117 * returns number of bytes that have valid mappings in hat.
118 118 *
119 119 * void hat_stats_enable(hat)
120 120 * void hat_stats_disable(hat)
121 121 * enables/disables collection of stats for hat.
122 122 *
123 123 * int hat_dup(parenthat, childhat, addr, len, flags)
124 124 * Duplicate address translations of the parent to the child. Supports
125 125 * the entire address range or a range depending on flag,
126 126 * zero returned on success, non-zero on error
127 127 *
128 128 * void hat_thread_exit(thread)
129 129 * Notifies the HAT that a thread is exiting, called after it has been
130 130 * reassigned to the kernel AS.
131 131 */
132 132
133 133 struct hat *hat_alloc(struct as *);
134 134 void hat_free_start(struct hat *);
135 135 void hat_free_end(struct hat *);
136 136 int hat_dup(struct hat *, struct hat *, caddr_t, size_t, uint_t);
137 137 void hat_swapin(struct hat *);
138 138 void hat_swapout(struct hat *);
139 139 size_t hat_get_mapped_size(struct hat *);
140 140 int hat_stats_enable(struct hat *);
141 141 void hat_stats_disable(struct hat *);
142 142 void hat_thread_exit(kthread_t *);
143 143
144 144 /*
145 145 * Operations on a named address within a segment:
146 146 *
147 147 * void hat_memload(hat, addr, pp, attr, flags)
148 148 * load/lock the given page struct
149 149 *
150 150 * void hat_memload_array(hat, addr, len, ppa, attr, flags)
151 151 * load/lock the given array of page structs
152 152 *
153 153 * void hat_devload(hat, addr, len, pf, attr, flags)
154 154 * load/lock the given page frame number
155 155 *
156 156 * void hat_unlock(hat, addr, len)
157 157 * unlock a given range of addresses
158 158 *
159 159 * void hat_unload(hat, addr, len, flags)
160 160 * void hat_unload_callback(hat, addr, len, flags, callback)
161 161 * unload a given range of addresses (has optional callback)
162 162 *
163 163 * void hat_sync(hat, addr, len, flags)
164 164 * synchronize mapping with software data structures
165 165 *
166 166 * void hat_map(hat, addr, len, flags)
167 167 *
168 168 * void hat_setattr(hat, addr, len, attr)
169 169 * void hat_clrattr(hat, addr, len, attr)
170 170 * void hat_chgattr(hat, addr, len, attr)
171 171 * modify attributes for a range of addresses. skips any invalid mappings
172 172 *
173 173 * uint_t hat_getattr(hat, addr, *attr)
174 174 * returns attr for <hat,addr> in *attr. returns 0 if there was a
175 175 * mapping and *attr is valid, nonzero if there was no mapping and
176 176 * *attr is not valid.
177 177 *
178 178 * size_t hat_getpagesize(hat, addr)
179 179 * returns pagesize in bytes for <hat, addr>. returns -1 if there is
180 180 * no mapping. This is an advisory call.
181 181 *
182 182 * pfn_t hat_getpfnum(hat, addr)
183 183 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
184 184 *
185 185 * int hat_probe(hat, addr)
186 186 * return 0 if no valid mapping is present. Faster version
187 187 * of hat_getattr in certain architectures.
188 188 *
189 189 * int hat_share(dhat, daddr, shat, saddr, len, szc)
190 190 *
191 191 * void hat_unshare(hat, addr, len, szc)
192 192 *
193 193 * void hat_chgprot(hat, addr, len, vprot)
194 194 * This is a deprecated call. New segment drivers should store
195 195 * all attributes and use hat_*attr calls.
196 196 * Change the protections in the virtual address range
197 197 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
198 198 * then remove write permission, leaving the other permissions
199 199 * unchanged. If vprot is ~PROT_USER, remove user permissions.
200 200 *
201 201 * void hat_flush_range(hat, addr, size)
202 202 * Invalidate a virtual address translation for the local CPU.
203 203 */
204 204
205 205 void hat_memload(struct hat *, caddr_t, struct page *, uint_t, uint_t);
206 206 void hat_memload_array(struct hat *, caddr_t, size_t, struct page **,
207 207 uint_t, uint_t);
208 208 void hat_memload_region(struct hat *, caddr_t, struct page *, uint_t,
209 209 uint_t, hat_region_cookie_t);
210 210 void hat_memload_array_region(struct hat *, caddr_t, size_t, struct page **,
211 211 uint_t, uint_t, hat_region_cookie_t);
212 212
213 213 void hat_devload(struct hat *, caddr_t, size_t, pfn_t, uint_t, int);
214 214
215 215 void hat_unlock(struct hat *, caddr_t, size_t);
216 216 void hat_unlock_region(struct hat *, caddr_t, size_t, hat_region_cookie_t);
217 217
218 218 void hat_unload(struct hat *, caddr_t, size_t, uint_t);
219 219 void hat_unload_callback(struct hat *, caddr_t, size_t, uint_t,
220 220 hat_callback_t *);
221 221 void hat_flush_range(struct hat *, caddr_t, size_t);
222 222 void hat_sync(struct hat *, caddr_t, size_t, uint_t);
223 223 void hat_map(struct hat *, caddr_t, size_t, uint_t);
224 224 void hat_setattr(struct hat *, caddr_t, size_t, uint_t);
225 225 void hat_clrattr(struct hat *, caddr_t, size_t, uint_t);
226 226 void hat_chgattr(struct hat *, caddr_t, size_t, uint_t);
227 227 uint_t hat_getattr(struct hat *, caddr_t, uint_t *);
228 228 ssize_t hat_getpagesize(struct hat *, caddr_t);
229 229 pfn_t hat_getpfnum(struct hat *, caddr_t);
230 230 int hat_probe(struct hat *, caddr_t);
231 231 int hat_share(struct hat *, caddr_t, struct hat *, caddr_t, size_t, uint_t);
232 232 void hat_unshare(struct hat *, caddr_t, size_t, uint_t);
233 233 void hat_chgprot(struct hat *, caddr_t, size_t, uint_t);
234 234 void hat_reserve(struct as *, caddr_t, size_t);
235 235 pfn_t va_to_pfn(void *);
236 236 uint64_t va_to_pa(void *);
237 237
238 238 /*
239 239 * Kernel Physical Mapping (segkpm) hat interface routines.
240 240 */
241 241 caddr_t hat_kpm_mapin(struct page *, struct kpme *);
242 242 void hat_kpm_mapout(struct page *, struct kpme *, caddr_t);
243 243 caddr_t hat_kpm_mapin_pfn(pfn_t);
244 244 void hat_kpm_mapout_pfn(pfn_t);
245 245 caddr_t hat_kpm_page2va(struct page *, int);
246 246 struct page *hat_kpm_vaddr2page(caddr_t);
247 247 int hat_kpm_fault(struct hat *, caddr_t);
248 248 void hat_kpm_mseghash_clear(int);
249 249 void hat_kpm_mseghash_update(pgcnt_t, struct memseg *);
250 250 void hat_kpm_addmem_mseg_update(struct memseg *, pgcnt_t, offset_t);
251 251 void hat_kpm_addmem_mseg_insert(struct memseg *);
252 252 void hat_kpm_addmem_memsegs_update(struct memseg *);
253 253 caddr_t hat_kpm_mseg_reuse(struct memseg *);
254 254 void hat_kpm_delmem_mseg_update(struct memseg *, struct memseg **);
255 255 void hat_kpm_split_mseg_update(struct memseg *, struct memseg **,
256 256 struct memseg *, struct memseg *, struct memseg *);
257 257 void hat_kpm_walk(void (*)(void *, void *, size_t), void *);
258 258
259 259 /*
260 260 * Operations on all translations for a given page(s)
261 261 *
262 262 * void hat_page_setattr(pp, flag)
263 263 * void hat_page_clrattr(pp, flag)
264 264 * used to set/clr red/mod bits.
265 265 *
266 266 * uint hat_page_getattr(pp, flag)
267 267 * If flag is specified, returns 0 if attribute is disabled
268 268 * and non zero if enabled. If flag specifes multiple attributs
269 269 * then returns 0 if ALL atriibutes are disabled. This is an advisory
270 270 * call.
271 271 *
272 272 * int hat_pageunload(pp, forceflag)
273 273 * Unload all translations attached to pp. On x86 the bulk of the work is
274 274 * done by hat_page_inval.
275 275 *
276 276 * void hat_page_inval(pp, pgsz, curhat)
277 277 * Unload translations attached to pp. If curhat is provided, only the
278 278 * translation for that process is unloaded, otherwise all are unloaded.
279 279 *
280 280 * uint_t hat_pagesync(pp, flags)
281 281 * get hw stats from hardware into page struct and reset hw stats
282 282 * returns attributes of page
283 283 *
284 284 * ulong_t hat_page_getshare(pp)
285 285 * returns approx number of mappings to this pp. A return of 0 implies
286 286 * there are no mappings to the page.
287 287 *
288 288 * faultcode_t hat_softlock(hat, addr, lenp, ppp, flags);
289 289 * called to softlock pages for zero copy tcp
290 290 *
291 291 * void hat_page_demote(pp);
292 292 * unload all large mappings to pp and decrease p_szc of all
293 293 * constituent pages according to the remaining mappings.
294 294 */
295 295
296 296 void hat_page_setattr(struct page *, uint_t);
297 297 void hat_page_clrattr(struct page *, uint_t);
298 298 uint_t hat_page_getattr(struct page *, uint_t);
299 299 int hat_pageunload(struct page *, uint_t);
300 300 void hat_page_inval(struct page *, uint_t, struct hat *);
301 301 uint_t hat_pagesync(struct page *, uint_t);
302 302 ulong_t hat_page_getshare(struct page *);
303 303 int hat_page_checkshare(struct page *, ulong_t);
304 304 faultcode_t hat_softlock(struct hat *, caddr_t, size_t *,
305 305 struct page **, uint_t);
306 306 void hat_page_demote(struct page *);
307 307
308 308 /*
309 309 * Rountine to expose supported HAT features to PIM.
310 310 */
311 311 enum hat_features {
312 312 HAT_SHARED_PT, /* Shared page tables */
313 313 HAT_DYNAMIC_ISM_UNMAP, /* hat_pageunload() handles ISM pages */
314 314 HAT_VMODSORT, /* support for VMODSORT flag of vnode */
315 315 HAT_SHARED_REGIONS /* shared regions support */
316 316 };
317 317
318 318 int hat_supported(enum hat_features, void *);
319 319
320 320 /*
321 321 * Services provided to the hat:
322 322 *
323 323 * void as_signal_proc(as, siginfo)
324 324 * deliver signal to all processes that have this as.
325 325 *
326 326 * int hat_setstat(as, addr, len, rmbits)
327 327 * informs hatstat layer that ref/mod bits need to be updated for
328 328 * address range. Returns 0 on success, 1 for failure.
329 329 */
330 330 void as_signal_proc(struct as *, k_siginfo_t *siginfo);
331 331 void hat_setstat(struct as *, caddr_t, size_t, uint_t);
332 332
333 333 /*
334 334 * Flags to pass to hat routines.
335 335 *
336 336 * Certain flags only apply to some interfaces:
337 337 *
338 338 * HAT_LOAD Default flags to load a translation to the page.
339 339 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
340 340 * and hat_devload().
341 341 * HAT_LOAD_ADV Advisory load - Load translation if and only if
342 342 * sufficient MMU resources exist (i.e., do not steal).
343 343 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
344 344 * that map some user pages (not kas) is shared by more
345 345 * than one process (eg. ISM).
346 346 * HAT_LOAD_CONTIG Pages are contigous
347 347 * HAT_LOAD_NOCONSIST Do not add mapping to mapping list.
348 348 * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
349 349 * HAT_RELOAD_SHARE Reload a shared page table entry. Some platforms
350 350 * may require different actions than on the first
351 351 * load of a shared mapping.
352 352 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
353 353 * point, it's setting up mapping to allocate internal
354 354 * hat layer data structures. This flag forces hat layer
355 355 * to tap its reserves in order to prevent infinite
356 356 * recursion.
357 357 * HAT_LOAD_TEXT A flag to hat_memload() to indicate loading text pages.
358 358 */
359 359
360 360 /*
361 361 * Flags for hat_memload/hat_devload
362 362 */
363 363 #define HAT_FLAGS_RESV 0xFF000000 /* resv for hat impl */
364 364 #define HAT_LOAD 0x00
365 365 #define HAT_LOAD_LOCK 0x01
366 366 #define HAT_LOAD_ADV 0x04
367 367 #define HAT_LOAD_CONTIG 0x10
368 368 #define HAT_LOAD_NOCONSIST 0x20
369 369 #define HAT_LOAD_SHARE 0x40
370 370 #define HAT_LOAD_REMAP 0x80
371 371 #define HAT_RELOAD_SHARE 0x100
372 372 #define HAT_NO_KALLOC 0x200
373 373 #define HAT_LOAD_TEXT 0x400
374 374
375 375 /*
376 376 * Flags for initializing disable_*large_pages.
377 377 *
378 378 * HAT_AUTO_TEXT Get MMU specific disable_auto_text_large_pages
379 379 * HAT_AUTO_DATA Get MMU specific disable_auto_data_large_pages
380 380 */
381 381 #define HAT_AUTO_TEXT 0x800
382 382 #define HAT_AUTO_DATA 0x1000
383 383
384 384 /*
385 385 * Attributes for hat_memload/hat_devload/hat_*attr
386 386 * are a superset of prot flags defined in mman.h.
387 387 */
388 388 #define HAT_PLAT_ATTR_MASK 0xF00000
389 389 #define HAT_PROT_MASK 0x0F
390 390
391 391 #define HAT_NOFAULT 0x10
392 392 #define HAT_NOSYNC 0x20
393 393
394 394 /*
395 395 * Advisory ordering attributes. Apply only to device mappings.
396 396 *
397 397 * HAT_STRICTORDER: the CPU must issue the references in order, as the
398 398 * programmer specified. This is the default.
399 399 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
400 400 * of reordering; store or load with store or load).
401 401 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
402 402 * to consecutive locations (for example, turn two consecutive byte
403 403 * stores into one halfword store), and it may batch individual loads
404 404 * (for example, turn two consecutive byte loads into one halfword load).
405 405 * This also implies re-ordering.
406 406 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
407 407 * until another store occurs. The default is to fetch new data
408 408 * on every load. This also implies merging.
409 409 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
410 410 * the device (perhaps with other data) at a later time. The default is
411 411 * to push the data right away. This also implies load caching.
412 412 */
413 413 #define HAT_STRICTORDER 0x0000
414 414 #define HAT_UNORDERED_OK 0x0100
415 415 #define HAT_MERGING_OK 0x0200
416 416 #define HAT_LOADCACHING_OK 0x0300
417 417 #define HAT_STORECACHING_OK 0x0400
418 418 #define HAT_ORDER_MASK 0x0700
419 419
420 420 /* endian attributes */
421 421 #define HAT_NEVERSWAP 0x0000
422 422 #define HAT_STRUCTURE_BE 0x1000
423 423 #define HAT_STRUCTURE_LE 0x2000
424 424 #define HAT_ENDIAN_MASK 0x3000
425 425
426 426 /* flags for hat_softlock */
427 427 #define HAT_COW 0x0001
428 428
429 429 /*
430 430 * Flags for hat_unload
431 431 */
432 432 #define HAT_UNLOAD 0x00
433 433 #define HAT_UNLOAD_NOSYNC 0x02
434 434 #define HAT_UNLOAD_UNLOCK 0x04
435 435 #define HAT_UNLOAD_OTHER 0x08
436 436 #define HAT_UNLOAD_UNMAP 0x10
437 437
438 438 /*
439 439 * Flags for hat_pagesync, hat_getstat, hat_sync
440 440 */
441 441 #define HAT_SYNC_DONTZERO 0x00
442 442 #define HAT_SYNC_ZERORM 0x01
443 443 /* Additional flags for hat_pagesync */
444 444 #define HAT_SYNC_STOPON_REF 0x02
445 445 #define HAT_SYNC_STOPON_MOD 0x04
446 446 #define HAT_SYNC_STOPON_RM (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD)
447 447 #define HAT_SYNC_STOPON_SHARED 0x08
448 448
449 449 /*
450 450 * Flags for hat_dup
451 451 *
452 452 * HAT_DUP_ALL dup entire address space
453 453 * HAT_DUP_COW dup plus hat_clrattr(..PROT_WRITE) on newas
454 454 */
455 455 #define HAT_DUP_ALL 1
456 456 #define HAT_DUP_COW 2
457 457 #define HAT_DUP_SRD 3
458 458
459 459
460 460 /*
461 461 * Flags for hat_map
462 462 */
463 463 #define HAT_MAP 0x00
464 464
465 465 /*
466 466 * Flag for hat_pageunload
467 467 */
468 468 #define HAT_ADV_PGUNLOAD 0x00
469 469 #define HAT_FORCE_PGUNLOAD 0x01
470 470 #define HAT_CURPROC_PGUNLOAD 0x02
471 471
472 472 /*
473 473 * Attributes for hat_page_*attr, hat_setstats and
474 474 * returned by hat_pagesync.
475 475 */
476 476 #define P_MOD 0x1 /* the modified bit */
477 477 #define P_REF 0x2 /* the referenced bit */
478 478 #define P_RO 0x4 /* Read only page */
479 479 #define P_NSH 0x8 /* Not to shuffle v_pages */
480 480
481 481 #define hat_ismod(pp) (hat_page_getattr(pp, P_MOD))
482 482 #define hat_isref(pp) (hat_page_getattr(pp, P_REF))
483 483 #define hat_isro(pp) (hat_page_getattr(pp, P_RO))
484 484
485 485 #define hat_setmod(pp) (hat_page_setattr(pp, P_MOD))
486 486 #define hat_setmod_only(pp) (hat_page_setattr(pp, P_MOD|P_NSH))
487 487 #define hat_setref(pp) (hat_page_setattr(pp, P_REF))
488 488 #define hat_setrefmod(pp) (hat_page_setattr(pp, P_REF|P_MOD))
489 489
490 490 #define hat_clrmod(pp) (hat_page_clrattr(pp, P_MOD))
491 491 #define hat_clrref(pp) (hat_page_clrattr(pp, P_REF))
492 492 #define hat_clrrefmod(pp) (hat_page_clrattr(pp, P_REF|P_MOD))
493 493
494 494 #define hat_page_is_mapped(pp) (hat_page_getshare(pp))
495 495
496 496 /*
497 497 * hat_setup is being used in sparc/os/sundep.c
498 498 */
499 499 void hat_setup(struct hat *, int);
500 500
501 501 /*
502 502 * Flags for hat_setup
503 503 */
504 504 #define HAT_DONTALLOC 0
505 505 #define HAT_ALLOC 1
506 506 #define HAT_INIT 2
507 507
508 508 /*
509 509 * Other routines, for statistics
510 510 */
511 511 int hat_startstat(struct as *);
512 512 void hat_getstat(struct as *, caddr_t, size_t, uint_t, char *, int);
513 513 void hat_freestat(struct as *, int);
514 514 void hat_resvstat(size_t, struct as *, caddr_t);
515 515
516 516 /*
517 517 * Relocation callback routines. Currently only sfmmu HAT supports
518 518 * these.
519 519 */
520 520 extern int hat_add_callback(id_t, caddr_t, uint_t, uint_t, void *,
521 521 pfn_t *, void **);
522 522 extern id_t hat_register_callback(int,
523 523 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
524 524 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
525 525 int (*errhandler)(caddr_t, uint_t, uint_t, void *), int);
526 526 extern void hat_delete_callback(caddr_t, uint_t, void *, uint_t, void *);
527 527
528 528 /*
529 529 * hat_add_callback()/hat_delete_callback() flags.
530 530 */
531 531 #define HAC_NOSLEEP 0x0
532 532 #define HAC_SLEEP 0x1
533 533 #define HAC_PAGELOCK 0x2
534 534
535 535 /*
536 536 * Suspend/unsuspend handler callback arguments.
537 537 */
538 538 #define HAT_SUSPEND 0x0010
539 539 #define HAT_UNSUSPEND 0x0010
540 540 #define HAT_PRESUSPEND 0x0020
541 541 #define HAT_POSTUNSUSPEND 0x0020
542 542
543 543 /*
544 544 * Error handler callback arguments. See the block comments
545 545 * before the implementation of hat_add_callback() for an
546 546 * explanation of what these mean.
547 547 */
548 548 #define HAT_CB_ERR_LEAKED 0x1
549 549
550 550 #endif /* _KERNEL */
551 551
552 552 /*
553 553 * The size of the bit array for ref and mod bit storage must be a power of 2.
554 554 * 2 bits are collected for each page. Below the power used is 4,
555 555 * which is 16 8-bit characters = 128 bits, ref and mod bit information
556 556 * for 64 pages.
557 557 */
558 558 #define HRM_SHIFT 4
559 559 #define HRM_BYTES (1 << HRM_SHIFT)
560 560 #define HRM_PAGES ((HRM_BYTES * NBBY) / 2)
561 561 #define HRM_PGPERBYTE (NBBY/2)
562 562 #define HRM_PGBYTEMASK (HRM_PGPERBYTE-1)
563 563
564 564 #define HRM_PGOFFMASK ((HRM_PGPERBYTE-1) << MMU_PAGESHIFT)
565 565 #define HRM_BASEOFFSET (((MMU_PAGESIZE * HRM_PAGES) - 1))
566 566 #define HRM_BASEMASK (~(HRM_BASEOFFSET))
567 567
568 568 #define HRM_BASESHIFT (MMU_PAGESHIFT + (HRM_SHIFT + 2))
569 569 #define HRM_PAGEMASK (MMU_PAGEMASK ^ HRM_BASEMASK)
570 570
571 571 #define HRM_HASHSIZE 0x200
572 572 #define HRM_HASHMASK (HRM_HASHSIZE - 1)
573 573
574 574 #define HRM_BLIST_INCR 0x200
575 575
576 576 /*
577 577 * The structure for maintaining referenced and modified information
578 578 */
579 579 struct hrmstat {
580 580 struct as *hrm_as; /* stat block belongs to this as */
581 581 uintptr_t hrm_base; /* base of block */
582 582 ushort_t hrm_id; /* opaque identifier, one of a_vbits */
583 583 struct hrmstat *hrm_anext; /* as statistics block list */
584 584 struct hrmstat *hrm_hnext; /* list for hashed blocks */
585 585 uchar_t hrm_bits[HRM_BYTES]; /* the ref and mod bits */
586 586 };
587 587
588 588 extern struct hrmstat **hrm_hashtab;
589 589
590 590 /*
591 591 * For global monitoring of the reference and modified bits
592 592 * of all address spaces we reserve one id bit.
593 593 */
594 594 #define HRM_SWSMONID 1
595 595
596 596
597 597 #ifdef _KERNEL
598 598
599 599 /*
600 600 * Hat locking functions
601 601 * XXX - these two functions are currently being used by hatstats
602 602 * they can be removed by using a per-as mutex for hatstats.
603 603 */
604 604 void hat_enter(struct hat *);
605 605 void hat_exit(struct hat *);
606 606
607 607 typedef void (*hat_rgn_cb_func_t)(caddr_t, caddr_t, caddr_t,
608 608 size_t, void *, u_offset_t);
609 609
610 610 void hat_join_srd(struct hat *, vnode_t *);
611 611
612 612 hat_region_cookie_t hat_join_region(struct hat *, caddr_t, size_t, void *,
613 613 u_offset_t, uchar_t, uchar_t, hat_rgn_cb_func_t,
614 614 uint_t);
615 615 void hat_leave_region(struct hat *, hat_region_cookie_t,
616 616 uint_t);
617 617 void hat_dup_region(struct hat *, hat_region_cookie_t);
618 618
619 619 #define HAT_INVALID_REGION_COOKIE ((hat_region_cookie_t)-1)
620 620 #define HAT_IS_REGION_COOKIE_VALID(c) ((c) != HAT_INVALID_REGION_COOKIE)
621 621
622 622 /* hat_join_region() flags */
623 623
624 624 #define HAT_REGION_TEXT 0x1 /* passed by segvn */
625 625 #define HAT_REGION_ISM 0x2 /* for hat_share()/hat_unshare() */
626 626
627 627 #define HAT_REGION_TYPE_MASK (0x7)
628 628
629 629 #endif /* _KERNEL */
630 630
631 631 #ifdef __cplusplus
632 632 }
633 633 #endif
634 634
635 635 #endif /* _VM_HAT_H */
|
↓ open down ↓ |
635 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX