Print this page
13097 improve VM tunables for modern systems (fix mismerge)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/vm/vm_page.c
+++ new/usr/src/uts/common/vm/vm_page.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
24 24 * Copyright (c) 2015, 2016 by Delphix. All rights reserved.
25 25 * Copyright 2018 Joyent, Inc.
26 26 */
27 27
28 28 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
29 29 /* All Rights Reserved */
30 30
31 31 /*
32 32 * University Copyright- Copyright (c) 1982, 1986, 1988
33 33 * The Regents of the University of California
34 34 * All Rights Reserved
35 35 *
36 36 * University Acknowledgment- Portions of this document are derived from
37 37 * software developed by the University of California, Berkeley, and its
38 38 * contributors.
39 39 */
40 40
41 41 /*
42 42 * VM - physical page management.
43 43 */
44 44
45 45 #include <sys/types.h>
46 46 #include <sys/t_lock.h>
47 47 #include <sys/param.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/errno.h>
50 50 #include <sys/time.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/vm.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/swap.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/tuneable.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/cpuvar.h>
59 59 #include <sys/callb.h>
60 60 #include <sys/debug.h>
61 61 #include <sys/tnf_probe.h>
62 62 #include <sys/condvar_impl.h>
63 63 #include <sys/mem_config.h>
64 64 #include <sys/mem_cage.h>
65 65 #include <sys/kmem.h>
66 66 #include <sys/atomic.h>
67 67 #include <sys/strlog.h>
68 68 #include <sys/mman.h>
69 69 #include <sys/ontrap.h>
70 70 #include <sys/lgrp.h>
71 71 #include <sys/vfs.h>
72 72
73 73 #include <vm/hat.h>
74 74 #include <vm/anon.h>
75 75 #include <vm/page.h>
76 76 #include <vm/seg.h>
|
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
77 77 #include <vm/pvn.h>
78 78 #include <vm/seg_kmem.h>
79 79 #include <vm/vm_dep.h>
80 80 #include <sys/vm_usage.h>
81 81 #include <fs/fs_subr.h>
82 82 #include <sys/ddi.h>
83 83 #include <sys/modctl.h>
84 84
85 85 static pgcnt_t max_page_get; /* max page_get request size in pages */
86 86 pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */
87 +uint64_t n_throttle = 0; /* num times page create throttled */
87 88
88 89 /*
89 90 * freemem_lock protects all freemem variables:
90 91 * availrmem. Also this lock protects the globals which track the
91 92 * availrmem changes for accurate kernel footprint calculation.
92 93 * See below for an explanation of these
93 94 * globals.
94 95 */
95 96 kmutex_t freemem_lock;
96 97 pgcnt_t availrmem;
97 98 pgcnt_t availrmem_initial;
98 99
99 100 /*
100 101 * These globals track availrmem changes to get a more accurate
101 102 * estimate of tke kernel size. Historically pp_kernel is used for
102 103 * kernel size and is based on availrmem. But availrmem is adjusted for
103 104 * locked pages in the system not just for kernel locked pages.
104 105 * These new counters will track the pages locked through segvn and
105 106 * by explicit user locking.
106 107 *
107 108 * pages_locked : How many pages are locked because of user specified
108 109 * locking through mlock or plock.
109 110 *
110 111 * pages_useclaim,pages_claimed : These two variables track the
111 112 * claim adjustments because of the protection changes on a segvn segment.
112 113 *
113 114 * All these globals are protected by the same lock which protects availrmem.
114 115 */
115 116 pgcnt_t pages_locked = 0;
116 117 pgcnt_t pages_useclaim = 0;
117 118 pgcnt_t pages_claimed = 0;
118 119
119 120
120 121 /*
121 122 * new_freemem_lock protects freemem, freemem_wait & freemem_cv.
122 123 */
123 124 static kmutex_t new_freemem_lock;
124 125 static uint_t freemem_wait; /* someone waiting for freemem */
125 126 static kcondvar_t freemem_cv;
126 127
127 128 /*
128 129 * The logical page free list is maintained as two lists, the 'free'
129 130 * and the 'cache' lists.
130 131 * The free list contains those pages that should be reused first.
131 132 *
132 133 * The implementation of the lists is machine dependent.
133 134 * page_get_freelist(), page_get_cachelist(),
134 135 * page_list_sub(), and page_list_add()
135 136 * form the interface to the machine dependent implementation.
136 137 *
137 138 * Pages with p_free set are on the cache list.
138 139 * Pages with p_free and p_age set are on the free list,
139 140 *
140 141 * A page may be locked while on either list.
141 142 */
142 143
143 144 /*
144 145 * free list accounting stuff.
145 146 *
146 147 *
147 148 * Spread out the value for the number of pages on the
148 149 * page free and page cache lists. If there is just one
149 150 * value, then it must be under just one lock.
150 151 * The lock contention and cache traffic are a real bother.
151 152 *
152 153 * When we acquire and then drop a single pcf lock
153 154 * we can start in the middle of the array of pcf structures.
154 155 * If we acquire more than one pcf lock at a time, we need to
155 156 * start at the front to avoid deadlocking.
156 157 *
157 158 * pcf_count holds the number of pages in each pool.
158 159 *
159 160 * pcf_block is set when page_create_get_something() has asked the
160 161 * PSM page freelist and page cachelist routines without specifying
161 162 * a color and nothing came back. This is used to block anything
162 163 * else from moving pages from one list to the other while the
163 164 * lists are searched again. If a page is freeed while pcf_block is
164 165 * set, then pcf_reserve is incremented. pcgs_unblock() takes care
165 166 * of clearning pcf_block, doing the wakeups, etc.
166 167 */
167 168
168 169 #define MAX_PCF_FANOUT NCPU
169 170 static uint_t pcf_fanout = 1; /* Will get changed at boot time */
170 171 static uint_t pcf_fanout_mask = 0;
171 172
172 173 struct pcf {
173 174 kmutex_t pcf_lock; /* protects the structure */
174 175 uint_t pcf_count; /* page count */
175 176 uint_t pcf_wait; /* number of waiters */
176 177 uint_t pcf_block; /* pcgs flag to page_free() */
177 178 uint_t pcf_reserve; /* pages freed after pcf_block set */
178 179 uint_t pcf_fill[10]; /* to line up on the caches */
179 180 };
180 181
181 182 /*
182 183 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where
183 184 * it will hash the cpu to). This is done to prevent a drain condition
184 185 * from happening. This drain condition will occur when pcf_count decrement
185 186 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An
186 187 * example of this shows up with device interrupts. The dma buffer is allocated
187 188 * by the cpu requesting the IO thus the pcf_count is decremented based on that.
188 189 * When the memory is returned by the interrupt thread, the pcf_count will be
189 190 * incremented based on the cpu servicing the interrupt.
190 191 */
191 192 static struct pcf pcf[MAX_PCF_FANOUT];
192 193 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \
193 194 (randtick() >> 24)) & (pcf_fanout_mask))
194 195
195 196 static int pcf_decrement_bucket(pgcnt_t);
196 197 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int);
197 198
198 199 kmutex_t pcgs_lock; /* serializes page_create_get_ */
199 200 kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */
200 201 kmutex_t pcgs_wait_lock; /* used for delay in pcgs */
201 202 static kcondvar_t pcgs_cv; /* cv for delay in pcgs */
202 203
203 204 #ifdef VM_STATS
204 205
205 206 /*
206 207 * No locks, but so what, they are only statistics.
207 208 */
208 209
209 210 static struct page_tcnt {
210 211 int pc_free_cache; /* free's into cache list */
211 212 int pc_free_dontneed; /* free's with dontneed */
212 213 int pc_free_pageout; /* free's from pageout */
213 214 int pc_free_free; /* free's into free list */
214 215 int pc_free_pages; /* free's into large page free list */
215 216 int pc_destroy_pages; /* large page destroy's */
216 217 int pc_get_cache; /* get's from cache list */
217 218 int pc_get_free; /* get's from free list */
218 219 int pc_reclaim; /* reclaim's */
219 220 int pc_abortfree; /* abort's of free pages */
220 221 int pc_find_hit; /* find's that find page */
221 222 int pc_find_miss; /* find's that don't find page */
222 223 int pc_destroy_free; /* # of free pages destroyed */
223 224 #define PC_HASH_CNT (4*PAGE_HASHAVELEN)
224 225 int pc_find_hashlen[PC_HASH_CNT+1];
225 226 int pc_addclaim_pages;
226 227 int pc_subclaim_pages;
227 228 int pc_free_replacement_page[2];
228 229 int pc_try_demote_pages[6];
229 230 int pc_demote_pages[2];
230 231 } pagecnt;
231 232
232 233 uint_t hashin_count;
233 234 uint_t hashin_not_held;
234 235 uint_t hashin_already;
235 236
236 237 uint_t hashout_count;
237 238 uint_t hashout_not_held;
238 239
239 240 uint_t page_create_count;
240 241 uint_t page_create_not_enough;
241 242 uint_t page_create_not_enough_again;
242 243 uint_t page_create_zero;
243 244 uint_t page_create_hashout;
244 245 uint_t page_create_page_lock_failed;
245 246 uint_t page_create_trylock_failed;
246 247 uint_t page_create_found_one;
247 248 uint_t page_create_hashin_failed;
248 249 uint_t page_create_dropped_phm;
249 250
250 251 uint_t page_create_new;
251 252 uint_t page_create_exists;
252 253 uint_t page_create_putbacks;
253 254 uint_t page_create_overshoot;
254 255
255 256 uint_t page_reclaim_zero;
256 257 uint_t page_reclaim_zero_locked;
257 258
258 259 uint_t page_rename_exists;
259 260 uint_t page_rename_count;
260 261
261 262 uint_t page_lookup_cnt[20];
262 263 uint_t page_lookup_nowait_cnt[10];
263 264 uint_t page_find_cnt;
264 265 uint_t page_exists_cnt;
265 266 uint_t page_exists_forreal_cnt;
266 267 uint_t page_lookup_dev_cnt;
267 268 uint_t get_cachelist_cnt;
268 269 uint_t page_create_cnt[10];
269 270 uint_t alloc_pages[9];
270 271 uint_t page_exphcontg[19];
271 272 uint_t page_create_large_cnt[10];
272 273
273 274 #endif
274 275
275 276 static inline page_t *
276 277 page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off)
277 278 {
278 279 uint_t mylen = 0;
279 280 page_t *page;
280 281
281 282 for (page = page_hash[index]; page; page = page->p_hash, mylen++)
282 283 if (page->p_vnode == vnode && page->p_offset == off)
283 284 break;
284 285
285 286 #ifdef VM_STATS
286 287 if (page != NULL)
287 288 pagecnt.pc_find_hit++;
288 289 else
289 290 pagecnt.pc_find_miss++;
290 291
291 292 pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++;
292 293 #endif
293 294
294 295 return (page);
295 296 }
296 297
297 298
298 299 #ifdef DEBUG
299 300 #define MEMSEG_SEARCH_STATS
300 301 #endif
301 302
302 303 #ifdef MEMSEG_SEARCH_STATS
303 304 struct memseg_stats {
304 305 uint_t nsearch;
305 306 uint_t nlastwon;
306 307 uint_t nhashwon;
307 308 uint_t nnotfound;
308 309 } memseg_stats;
309 310
310 311 #define MEMSEG_STAT_INCR(v) \
311 312 atomic_inc_32(&memseg_stats.v)
312 313 #else
313 314 #define MEMSEG_STAT_INCR(x)
314 315 #endif
315 316
316 317 struct memseg *memsegs; /* list of memory segments */
317 318
318 319 /*
319 320 * /etc/system tunable to control large page allocation hueristic.
320 321 *
321 322 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup
322 323 * for large page allocation requests. If a large page is not readily
323 324 * avaliable on the local freelists we will go through additional effort
324 325 * to create a large page, potentially moving smaller pages around to coalesce
325 326 * larger pages in the local lgroup.
326 327 * Default value of LPAP_DEFAULT will go to remote freelists if large pages
327 328 * are not readily available in the local lgroup.
328 329 */
329 330 enum lpap {
330 331 LPAP_DEFAULT, /* default large page allocation policy */
331 332 LPAP_LOCAL /* local large page allocation policy */
332 333 };
333 334
334 335 enum lpap lpg_alloc_prefer = LPAP_DEFAULT;
335 336
336 337 static void page_init_mem_config(void);
337 338 static int page_do_hashin(page_t *, vnode_t *, u_offset_t);
338 339 static void page_do_hashout(page_t *);
339 340 static void page_capture_init();
340 341 int page_capture_take_action(page_t *, uint_t, void *);
341 342
342 343 static void page_demote_vp_pages(page_t *);
343 344
344 345
345 346 void
346 347 pcf_init(void)
347 348 {
348 349 if (boot_ncpus != -1) {
349 350 pcf_fanout = boot_ncpus;
350 351 } else {
351 352 pcf_fanout = max_ncpus;
352 353 }
353 354 #ifdef sun4v
354 355 /*
355 356 * Force at least 4 buckets if possible for sun4v.
356 357 */
357 358 pcf_fanout = MAX(pcf_fanout, 4);
358 359 #endif /* sun4v */
359 360
360 361 /*
361 362 * Round up to the nearest power of 2.
362 363 */
363 364 pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT);
364 365 if (!ISP2(pcf_fanout)) {
365 366 pcf_fanout = 1 << highbit(pcf_fanout);
366 367
367 368 if (pcf_fanout > MAX_PCF_FANOUT) {
368 369 pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1);
369 370 }
370 371 }
371 372 pcf_fanout_mask = pcf_fanout - 1;
372 373 }
373 374
374 375 /*
375 376 * vm subsystem related initialization
376 377 */
377 378 void
378 379 vm_init(void)
379 380 {
380 381 boolean_t callb_vm_cpr(void *, int);
381 382
382 383 (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm");
383 384 page_init_mem_config();
384 385 page_retire_init();
385 386 vm_usage_init();
386 387 page_capture_init();
387 388 }
388 389
389 390 /*
390 391 * This function is called at startup and when memory is added or deleted.
391 392 */
392 393 void
393 394 init_pages_pp_maximum()
394 395 {
395 396 static pgcnt_t p_min;
396 397 static pgcnt_t pages_pp_maximum_startup;
397 398 static pgcnt_t avrmem_delta;
398 399 static int init_done;
399 400 static int user_set; /* true if set in /etc/system */
400 401
401 402 if (init_done == 0) {
402 403
403 404 /* If the user specified a value, save it */
404 405 if (pages_pp_maximum != 0) {
405 406 user_set = 1;
406 407 pages_pp_maximum_startup = pages_pp_maximum;
407 408 }
408 409
409 410 /*
410 411 * Setting of pages_pp_maximum is based first time
411 412 * on the value of availrmem just after the start-up
412 413 * allocations. To preserve this relationship at run
413 414 * time, use a delta from availrmem_initial.
414 415 */
415 416 ASSERT(availrmem_initial >= availrmem);
416 417 avrmem_delta = availrmem_initial - availrmem;
417 418
418 419 /* The allowable floor of pages_pp_maximum */
419 420 p_min = tune.t_minarmem + 100;
420 421
421 422 /* Make sure we don't come through here again. */
422 423 init_done = 1;
423 424 }
424 425 /*
425 426 * Determine pages_pp_maximum, the number of currently available
426 427 * pages (availrmem) that can't be `locked'. If not set by
427 428 * the user, we set it to 4% of the currently available memory
428 429 * plus 4MB.
429 430 * But we also insist that it be greater than tune.t_minarmem;
430 431 * otherwise a process could lock down a lot of memory, get swapped
431 432 * out, and never have enough to get swapped back in.
432 433 */
433 434 if (user_set)
434 435 pages_pp_maximum = pages_pp_maximum_startup;
435 436 else
436 437 pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25)
437 438 + btop(4 * 1024 * 1024);
438 439
439 440 if (pages_pp_maximum <= p_min) {
440 441 pages_pp_maximum = p_min;
441 442 }
442 443 }
443 444
444 445 /*
445 446 * In the past, we limited the maximum pages that could be gotten to essentially
446 447 * 1/2 of the total pages on the system. However, this is too conservative for
447 448 * some cases. For example, if we want to host a large virtual machine which
448 449 * needs to use a significant portion of the system's memory. In practice,
449 450 * allowing more than 1/2 of the total pages is fine, but becomes problematic
450 451 * as we approach or exceed 75% of the pages on the system. Thus, we limit the
451 452 * maximum to 23/32 of the total pages, which is ~72%.
452 453 */
453 454 void
454 455 set_max_page_get(pgcnt_t target_total_pages)
455 456 {
456 457 max_page_get = (target_total_pages >> 5) * 23;
457 458 ASSERT3U(max_page_get, >, 0);
458 459 }
459 460
460 461 pgcnt_t
461 462 get_max_page_get()
462 463 {
463 464 return (max_page_get);
464 465 }
465 466
466 467 static pgcnt_t pending_delete;
467 468
468 469 /*ARGSUSED*/
469 470 static void
470 471 page_mem_config_post_add(
471 472 void *arg,
472 473 pgcnt_t delta_pages)
473 474 {
474 475 set_max_page_get(total_pages - pending_delete);
475 476 init_pages_pp_maximum();
476 477 }
477 478
478 479 /*ARGSUSED*/
479 480 static int
480 481 page_mem_config_pre_del(
481 482 void *arg,
482 483 pgcnt_t delta_pages)
483 484 {
484 485 pgcnt_t nv;
485 486
486 487 nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages);
487 488 set_max_page_get(total_pages - nv);
488 489 return (0);
489 490 }
490 491
491 492 /*ARGSUSED*/
492 493 static void
493 494 page_mem_config_post_del(
494 495 void *arg,
495 496 pgcnt_t delta_pages,
496 497 int cancelled)
497 498 {
498 499 pgcnt_t nv;
499 500
500 501 nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages);
501 502 set_max_page_get(total_pages - nv);
502 503 if (!cancelled)
503 504 init_pages_pp_maximum();
504 505 }
505 506
506 507 static kphysm_setup_vector_t page_mem_config_vec = {
507 508 KPHYSM_SETUP_VECTOR_VERSION,
508 509 page_mem_config_post_add,
509 510 page_mem_config_pre_del,
510 511 page_mem_config_post_del,
511 512 };
512 513
513 514 static void
514 515 page_init_mem_config(void)
515 516 {
516 517 int ret;
517 518
518 519 ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL);
519 520 ASSERT(ret == 0);
520 521 }
521 522
522 523 /*
523 524 * Evenly spread out the PCF counters for large free pages
524 525 */
525 526 static void
526 527 page_free_large_ctr(pgcnt_t npages)
527 528 {
528 529 static struct pcf *p = pcf;
529 530 pgcnt_t lump;
530 531
531 532 freemem += npages;
532 533
533 534 lump = roundup(npages, pcf_fanout) / pcf_fanout;
534 535
535 536 while (npages > 0) {
536 537
537 538 ASSERT(!p->pcf_block);
538 539
539 540 if (lump < npages) {
540 541 p->pcf_count += (uint_t)lump;
541 542 npages -= lump;
542 543 } else {
543 544 p->pcf_count += (uint_t)npages;
544 545 npages = 0;
545 546 }
546 547
547 548 ASSERT(!p->pcf_wait);
548 549
549 550 if (++p > &pcf[pcf_fanout - 1])
550 551 p = pcf;
551 552 }
552 553
553 554 ASSERT(npages == 0);
554 555 }
555 556
556 557 /*
557 558 * Add a physical chunk of memory to the system free lists during startup.
558 559 * Platform specific startup() allocates the memory for the page structs.
559 560 *
560 561 * num - number of page structures
561 562 * base - page number (pfn) to be associated with the first page.
562 563 *
563 564 * Since we are doing this during startup (ie. single threaded), we will
564 565 * use shortcut routines to avoid any locking overhead while putting all
565 566 * these pages on the freelists.
566 567 *
567 568 * NOTE: Any changes performed to page_free(), must also be performed to
568 569 * add_physmem() since this is how we initialize all page_t's at
569 570 * boot time.
570 571 */
571 572 void
572 573 add_physmem(
573 574 page_t *pp,
574 575 pgcnt_t num,
575 576 pfn_t pnum)
576 577 {
577 578 page_t *root = NULL;
578 579 uint_t szc = page_num_pagesizes() - 1;
579 580 pgcnt_t large = page_get_pagecnt(szc);
580 581 pgcnt_t cnt = 0;
581 582
582 583 TRACE_2(TR_FAC_VM, TR_PAGE_INIT,
583 584 "add_physmem:pp %p num %lu", pp, num);
584 585
585 586 /*
586 587 * Arbitrarily limit the max page_get request
587 588 * to 1/2 of the page structs we have.
588 589 */
589 590 total_pages += num;
590 591 set_max_page_get(total_pages);
591 592
592 593 PLCNT_MODIFY_MAX(pnum, (long)num);
593 594
594 595 /*
595 596 * The physical space for the pages array
596 597 * representing ram pages has already been
597 598 * allocated. Here we initialize each lock
598 599 * in the page structure, and put each on
599 600 * the free list
600 601 */
601 602 for (; num; pp++, pnum++, num--) {
602 603
603 604 /*
604 605 * this needs to fill in the page number
605 606 * and do any other arch specific initialization
606 607 */
607 608 add_physmem_cb(pp, pnum);
608 609
609 610 pp->p_lckcnt = 0;
610 611 pp->p_cowcnt = 0;
611 612 pp->p_slckcnt = 0;
612 613
613 614 /*
614 615 * Initialize the page lock as unlocked, since nobody
615 616 * can see or access this page yet.
616 617 */
617 618 pp->p_selock = 0;
618 619
619 620 /*
620 621 * Initialize IO lock
621 622 */
622 623 page_iolock_init(pp);
623 624
624 625 /*
625 626 * initialize other fields in the page_t
626 627 */
627 628 PP_SETFREE(pp);
628 629 page_clr_all_props(pp);
629 630 PP_SETAGED(pp);
630 631 pp->p_offset = (u_offset_t)-1;
631 632 pp->p_next = pp;
632 633 pp->p_prev = pp;
633 634
634 635 /*
635 636 * Simple case: System doesn't support large pages.
636 637 */
637 638 if (szc == 0) {
638 639 pp->p_szc = 0;
639 640 page_free_at_startup(pp);
640 641 continue;
641 642 }
642 643
643 644 /*
644 645 * Handle unaligned pages, we collect them up onto
645 646 * the root page until we have a full large page.
646 647 */
647 648 if (!IS_P2ALIGNED(pnum, large)) {
648 649
649 650 /*
650 651 * If not in a large page,
651 652 * just free as small page.
652 653 */
653 654 if (root == NULL) {
654 655 pp->p_szc = 0;
655 656 page_free_at_startup(pp);
656 657 continue;
657 658 }
658 659
659 660 /*
660 661 * Link a constituent page into the large page.
661 662 */
662 663 pp->p_szc = szc;
663 664 page_list_concat(&root, &pp);
664 665
665 666 /*
666 667 * When large page is fully formed, free it.
667 668 */
668 669 if (++cnt == large) {
669 670 page_free_large_ctr(cnt);
670 671 page_list_add_pages(root, PG_LIST_ISINIT);
671 672 root = NULL;
672 673 cnt = 0;
673 674 }
674 675 continue;
675 676 }
676 677
677 678 /*
678 679 * At this point we have a page number which
679 680 * is aligned. We assert that we aren't already
680 681 * in a different large page.
681 682 */
682 683 ASSERT(IS_P2ALIGNED(pnum, large));
683 684 ASSERT(root == NULL && cnt == 0);
684 685
685 686 /*
686 687 * If insufficient number of pages left to form
687 688 * a large page, just free the small page.
688 689 */
689 690 if (num < large) {
690 691 pp->p_szc = 0;
691 692 page_free_at_startup(pp);
692 693 continue;
693 694 }
694 695
695 696 /*
696 697 * Otherwise start a new large page.
697 698 */
698 699 pp->p_szc = szc;
699 700 cnt++;
700 701 root = pp;
701 702 }
702 703 ASSERT(root == NULL && cnt == 0);
703 704 }
704 705
705 706 /*
706 707 * Find a page representing the specified [vp, offset].
707 708 * If we find the page but it is intransit coming in,
708 709 * it will have an "exclusive" lock and we wait for
709 710 * the i/o to complete. A page found on the free list
710 711 * is always reclaimed and then locked. On success, the page
711 712 * is locked, its data is valid and it isn't on the free
712 713 * list, while a NULL is returned if the page doesn't exist.
713 714 */
714 715 page_t *
715 716 page_lookup(vnode_t *vp, u_offset_t off, se_t se)
716 717 {
717 718 return (page_lookup_create(vp, off, se, NULL, NULL, 0));
718 719 }
719 720
720 721 /*
721 722 * Find a page representing the specified [vp, offset].
722 723 * We either return the one we found or, if passed in,
723 724 * create one with identity of [vp, offset] of the
724 725 * pre-allocated page. If we find existing page but it is
725 726 * intransit coming in, it will have an "exclusive" lock
726 727 * and we wait for the i/o to complete. A page found on
727 728 * the free list is always reclaimed and then locked.
728 729 * On success, the page is locked, its data is valid and
729 730 * it isn't on the free list, while a NULL is returned
730 731 * if the page doesn't exist and newpp is NULL;
731 732 */
732 733 page_t *
733 734 page_lookup_create(
734 735 vnode_t *vp,
735 736 u_offset_t off,
736 737 se_t se,
737 738 page_t *newpp,
738 739 spgcnt_t *nrelocp,
739 740 int flags)
740 741 {
741 742 page_t *pp;
742 743 kmutex_t *phm;
743 744 ulong_t index;
744 745 uint_t hash_locked;
745 746 uint_t es;
746 747
747 748 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
748 749 VM_STAT_ADD(page_lookup_cnt[0]);
749 750 ASSERT(newpp ? PAGE_EXCL(newpp) : 1);
750 751
751 752 /*
752 753 * Acquire the appropriate page hash lock since
753 754 * we have to search the hash list. Pages that
754 755 * hash to this list can't change identity while
755 756 * this lock is held.
756 757 */
757 758 hash_locked = 0;
758 759 index = PAGE_HASH_FUNC(vp, off);
759 760 phm = NULL;
760 761 top:
761 762 pp = page_hash_search(index, vp, off);
762 763 if (pp != NULL) {
763 764 VM_STAT_ADD(page_lookup_cnt[1]);
764 765 es = (newpp != NULL) ? 1 : 0;
765 766 es |= flags;
766 767 if (!hash_locked) {
767 768 VM_STAT_ADD(page_lookup_cnt[2]);
768 769 if (!page_try_reclaim_lock(pp, se, es)) {
769 770 /*
770 771 * On a miss, acquire the phm. Then
771 772 * next time, page_lock() will be called,
772 773 * causing a wait if the page is busy.
773 774 * just looping with page_trylock() would
774 775 * get pretty boring.
775 776 */
776 777 VM_STAT_ADD(page_lookup_cnt[3]);
777 778 phm = PAGE_HASH_MUTEX(index);
778 779 mutex_enter(phm);
779 780 hash_locked = 1;
780 781 goto top;
781 782 }
782 783 } else {
783 784 VM_STAT_ADD(page_lookup_cnt[4]);
784 785 if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) {
785 786 VM_STAT_ADD(page_lookup_cnt[5]);
786 787 goto top;
787 788 }
788 789 }
789 790
790 791 /*
791 792 * Since `pp' is locked it can not change identity now.
792 793 * Reconfirm we locked the correct page.
793 794 *
794 795 * Both the p_vnode and p_offset *must* be cast volatile
795 796 * to force a reload of their values: The page_hash_search
796 797 * function will have stuffed p_vnode and p_offset into
797 798 * registers before calling page_trylock(); another thread,
798 799 * actually holding the hash lock, could have changed the
799 800 * page's identity in memory, but our registers would not
800 801 * be changed, fooling the reconfirmation. If the hash
801 802 * lock was held during the search, the casting would
802 803 * not be needed.
803 804 */
804 805 VM_STAT_ADD(page_lookup_cnt[6]);
805 806 if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
806 807 ((volatile u_offset_t)(pp->p_offset) != off)) {
807 808 VM_STAT_ADD(page_lookup_cnt[7]);
808 809 if (hash_locked) {
809 810 panic("page_lookup_create: lost page %p",
810 811 (void *)pp);
811 812 /*NOTREACHED*/
812 813 }
813 814 page_unlock(pp);
814 815 phm = PAGE_HASH_MUTEX(index);
815 816 mutex_enter(phm);
816 817 hash_locked = 1;
817 818 goto top;
818 819 }
819 820
820 821 /*
821 822 * If page_trylock() was called, then pp may still be on
822 823 * the cachelist (can't be on the free list, it would not
823 824 * have been found in the search). If it is on the
824 825 * cachelist it must be pulled now. To pull the page from
825 826 * the cachelist, it must be exclusively locked.
826 827 *
827 828 * The other big difference between page_trylock() and
828 829 * page_lock(), is that page_lock() will pull the
829 830 * page from whatever free list (the cache list in this
830 831 * case) the page is on. If page_trylock() was used
831 832 * above, then we have to do the reclaim ourselves.
832 833 */
833 834 if ((!hash_locked) && (PP_ISFREE(pp))) {
834 835 ASSERT(PP_ISAGED(pp) == 0);
835 836 VM_STAT_ADD(page_lookup_cnt[8]);
836 837
837 838 /*
838 839 * page_relcaim will insure that we
839 840 * have this page exclusively
840 841 */
841 842
842 843 if (!page_reclaim(pp, NULL)) {
843 844 /*
844 845 * Page_reclaim dropped whatever lock
845 846 * we held.
846 847 */
847 848 VM_STAT_ADD(page_lookup_cnt[9]);
848 849 phm = PAGE_HASH_MUTEX(index);
849 850 mutex_enter(phm);
850 851 hash_locked = 1;
851 852 goto top;
852 853 } else if (se == SE_SHARED && newpp == NULL) {
853 854 VM_STAT_ADD(page_lookup_cnt[10]);
854 855 page_downgrade(pp);
855 856 }
856 857 }
857 858
858 859 if (hash_locked) {
859 860 mutex_exit(phm);
860 861 }
861 862
862 863 if (newpp != NULL && pp->p_szc < newpp->p_szc &&
863 864 PAGE_EXCL(pp) && nrelocp != NULL) {
864 865 ASSERT(nrelocp != NULL);
865 866 (void) page_relocate(&pp, &newpp, 1, 1, nrelocp,
866 867 NULL);
867 868 if (*nrelocp > 0) {
868 869 VM_STAT_COND_ADD(*nrelocp == 1,
869 870 page_lookup_cnt[11]);
870 871 VM_STAT_COND_ADD(*nrelocp > 1,
871 872 page_lookup_cnt[12]);
872 873 pp = newpp;
873 874 se = SE_EXCL;
874 875 } else {
875 876 if (se == SE_SHARED) {
876 877 page_downgrade(pp);
877 878 }
878 879 VM_STAT_ADD(page_lookup_cnt[13]);
879 880 }
880 881 } else if (newpp != NULL && nrelocp != NULL) {
881 882 if (PAGE_EXCL(pp) && se == SE_SHARED) {
882 883 page_downgrade(pp);
883 884 }
884 885 VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc,
885 886 page_lookup_cnt[14]);
886 887 VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc,
887 888 page_lookup_cnt[15]);
888 889 VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc,
889 890 page_lookup_cnt[16]);
890 891 } else if (newpp != NULL && PAGE_EXCL(pp)) {
891 892 se = SE_EXCL;
892 893 }
893 894 } else if (!hash_locked) {
894 895 VM_STAT_ADD(page_lookup_cnt[17]);
895 896 phm = PAGE_HASH_MUTEX(index);
896 897 mutex_enter(phm);
897 898 hash_locked = 1;
898 899 goto top;
899 900 } else if (newpp != NULL) {
900 901 /*
901 902 * If we have a preallocated page then
902 903 * insert it now and basically behave like
903 904 * page_create.
904 905 */
905 906 VM_STAT_ADD(page_lookup_cnt[18]);
906 907 /*
907 908 * Since we hold the page hash mutex and
908 909 * just searched for this page, page_hashin
909 910 * had better not fail. If it does, that
910 911 * means some thread did not follow the
911 912 * page hash mutex rules. Panic now and
912 913 * get it over with. As usual, go down
913 914 * holding all the locks.
914 915 */
915 916 ASSERT(MUTEX_HELD(phm));
916 917 if (!page_hashin(newpp, vp, off, phm)) {
917 918 ASSERT(MUTEX_HELD(phm));
918 919 panic("page_lookup_create: hashin failed %p %p %llx %p",
919 920 (void *)newpp, (void *)vp, off, (void *)phm);
920 921 /*NOTREACHED*/
921 922 }
922 923 ASSERT(MUTEX_HELD(phm));
923 924 mutex_exit(phm);
924 925 phm = NULL;
925 926 page_set_props(newpp, P_REF);
926 927 page_io_lock(newpp);
927 928 pp = newpp;
928 929 se = SE_EXCL;
929 930 } else {
930 931 VM_STAT_ADD(page_lookup_cnt[19]);
931 932 mutex_exit(phm);
932 933 }
933 934
934 935 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
935 936
936 937 ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1);
937 938
938 939 return (pp);
939 940 }
940 941
941 942 /*
942 943 * Search the hash list for the page representing the
943 944 * specified [vp, offset] and return it locked. Skip
944 945 * free pages and pages that cannot be locked as requested.
945 946 * Used while attempting to kluster pages.
946 947 */
947 948 page_t *
948 949 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se)
949 950 {
950 951 page_t *pp;
951 952 kmutex_t *phm;
952 953 ulong_t index;
953 954 uint_t locked;
954 955
955 956 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
956 957 VM_STAT_ADD(page_lookup_nowait_cnt[0]);
957 958
958 959 index = PAGE_HASH_FUNC(vp, off);
959 960 pp = page_hash_search(index, vp, off);
960 961 locked = 0;
961 962 if (pp == NULL) {
962 963 top:
963 964 VM_STAT_ADD(page_lookup_nowait_cnt[1]);
964 965 locked = 1;
965 966 phm = PAGE_HASH_MUTEX(index);
966 967 mutex_enter(phm);
967 968 pp = page_hash_search(index, vp, off);
968 969 }
969 970
970 971 if (pp == NULL || PP_ISFREE(pp)) {
971 972 VM_STAT_ADD(page_lookup_nowait_cnt[2]);
972 973 pp = NULL;
973 974 } else {
974 975 if (!page_trylock(pp, se)) {
975 976 VM_STAT_ADD(page_lookup_nowait_cnt[3]);
976 977 pp = NULL;
977 978 } else {
978 979 VM_STAT_ADD(page_lookup_nowait_cnt[4]);
979 980 /*
980 981 * See the comment in page_lookup()
981 982 */
982 983 if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
983 984 ((u_offset_t)(pp->p_offset) != off)) {
984 985 VM_STAT_ADD(page_lookup_nowait_cnt[5]);
985 986 if (locked) {
986 987 panic("page_lookup_nowait %p",
987 988 (void *)pp);
988 989 /*NOTREACHED*/
989 990 }
990 991 page_unlock(pp);
991 992 goto top;
992 993 }
993 994 if (PP_ISFREE(pp)) {
994 995 VM_STAT_ADD(page_lookup_nowait_cnt[6]);
995 996 page_unlock(pp);
996 997 pp = NULL;
997 998 }
998 999 }
999 1000 }
1000 1001 if (locked) {
1001 1002 VM_STAT_ADD(page_lookup_nowait_cnt[7]);
1002 1003 mutex_exit(phm);
1003 1004 }
1004 1005
1005 1006 ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
1006 1007
1007 1008 return (pp);
1008 1009 }
1009 1010
1010 1011 /*
1011 1012 * Search the hash list for a page with the specified [vp, off]
1012 1013 * that is known to exist and is already locked. This routine
1013 1014 * is typically used by segment SOFTUNLOCK routines.
1014 1015 */
1015 1016 page_t *
1016 1017 page_find(vnode_t *vp, u_offset_t off)
1017 1018 {
1018 1019 page_t *pp;
1019 1020 kmutex_t *phm;
1020 1021 ulong_t index;
1021 1022
1022 1023 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1023 1024 VM_STAT_ADD(page_find_cnt);
1024 1025
1025 1026 index = PAGE_HASH_FUNC(vp, off);
1026 1027 phm = PAGE_HASH_MUTEX(index);
1027 1028
1028 1029 mutex_enter(phm);
1029 1030 pp = page_hash_search(index, vp, off);
1030 1031 mutex_exit(phm);
1031 1032
1032 1033 ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
1033 1034 return (pp);
1034 1035 }
1035 1036
1036 1037 /*
1037 1038 * Determine whether a page with the specified [vp, off]
1038 1039 * currently exists in the system. Obviously this should
1039 1040 * only be considered as a hint since nothing prevents the
1040 1041 * page from disappearing or appearing immediately after
1041 1042 * the return from this routine. Subsequently, we don't
1042 1043 * even bother to lock the list.
1043 1044 */
1044 1045 page_t *
1045 1046 page_exists(vnode_t *vp, u_offset_t off)
1046 1047 {
1047 1048 ulong_t index;
1048 1049
1049 1050 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1050 1051 VM_STAT_ADD(page_exists_cnt);
1051 1052
1052 1053 index = PAGE_HASH_FUNC(vp, off);
1053 1054
1054 1055 return (page_hash_search(index, vp, off));
1055 1056 }
1056 1057
1057 1058 /*
1058 1059 * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
1059 1060 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array
1060 1061 * with these pages locked SHARED. If necessary reclaim pages from
1061 1062 * freelist. Return 1 if contiguous pages exist and 0 otherwise.
1062 1063 *
1063 1064 * If we fail to lock pages still return 1 if pages exist and contiguous.
1064 1065 * But in this case return value is just a hint. ppa array won't be filled.
1065 1066 * Caller should initialize ppa[0] as NULL to distinguish return value.
1066 1067 *
1067 1068 * Returns 0 if pages don't exist or not physically contiguous.
1068 1069 *
1069 1070 * This routine doesn't work for anonymous(swapfs) pages.
1070 1071 */
1071 1072 int
1072 1073 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[])
1073 1074 {
1074 1075 pgcnt_t pages;
1075 1076 pfn_t pfn;
1076 1077 page_t *rootpp;
1077 1078 pgcnt_t i;
1078 1079 pgcnt_t j;
1079 1080 u_offset_t save_off = off;
1080 1081 ulong_t index;
1081 1082 kmutex_t *phm;
1082 1083 page_t *pp;
1083 1084 uint_t pszc;
1084 1085 int loopcnt = 0;
1085 1086
1086 1087 ASSERT(szc != 0);
1087 1088 ASSERT(vp != NULL);
1088 1089 ASSERT(!IS_SWAPFSVP(vp));
1089 1090 ASSERT(!VN_ISKAS(vp));
1090 1091
1091 1092 again:
1092 1093 if (++loopcnt > 3) {
1093 1094 VM_STAT_ADD(page_exphcontg[0]);
1094 1095 return (0);
1095 1096 }
1096 1097
1097 1098 index = PAGE_HASH_FUNC(vp, off);
1098 1099 phm = PAGE_HASH_MUTEX(index);
1099 1100
1100 1101 mutex_enter(phm);
1101 1102 pp = page_hash_search(index, vp, off);
1102 1103 mutex_exit(phm);
1103 1104
1104 1105 VM_STAT_ADD(page_exphcontg[1]);
1105 1106
1106 1107 if (pp == NULL) {
1107 1108 VM_STAT_ADD(page_exphcontg[2]);
1108 1109 return (0);
1109 1110 }
1110 1111
1111 1112 pages = page_get_pagecnt(szc);
1112 1113 rootpp = pp;
1113 1114 pfn = rootpp->p_pagenum;
1114 1115
1115 1116 if ((pszc = pp->p_szc) >= szc && ppa != NULL) {
1116 1117 VM_STAT_ADD(page_exphcontg[3]);
1117 1118 if (!page_trylock(pp, SE_SHARED)) {
1118 1119 VM_STAT_ADD(page_exphcontg[4]);
1119 1120 return (1);
1120 1121 }
1121 1122 /*
1122 1123 * Also check whether p_pagenum was modified by DR.
1123 1124 */
1124 1125 if (pp->p_szc != pszc || pp->p_vnode != vp ||
1125 1126 pp->p_offset != off || pp->p_pagenum != pfn) {
1126 1127 VM_STAT_ADD(page_exphcontg[5]);
1127 1128 page_unlock(pp);
1128 1129 off = save_off;
1129 1130 goto again;
1130 1131 }
1131 1132 /*
1132 1133 * szc was non zero and vnode and offset matched after we
1133 1134 * locked the page it means it can't become free on us.
1134 1135 */
1135 1136 ASSERT(!PP_ISFREE(pp));
1136 1137 if (!IS_P2ALIGNED(pfn, pages)) {
1137 1138 page_unlock(pp);
1138 1139 return (0);
1139 1140 }
1140 1141 ppa[0] = pp;
1141 1142 pp++;
1142 1143 off += PAGESIZE;
1143 1144 pfn++;
1144 1145 for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1145 1146 if (!page_trylock(pp, SE_SHARED)) {
1146 1147 VM_STAT_ADD(page_exphcontg[6]);
1147 1148 pp--;
1148 1149 while (i-- > 0) {
1149 1150 page_unlock(pp);
1150 1151 pp--;
1151 1152 }
1152 1153 ppa[0] = NULL;
1153 1154 return (1);
1154 1155 }
1155 1156 if (pp->p_szc != pszc) {
1156 1157 VM_STAT_ADD(page_exphcontg[7]);
1157 1158 page_unlock(pp);
1158 1159 pp--;
1159 1160 while (i-- > 0) {
1160 1161 page_unlock(pp);
1161 1162 pp--;
1162 1163 }
1163 1164 ppa[0] = NULL;
1164 1165 off = save_off;
1165 1166 goto again;
1166 1167 }
1167 1168 /*
1168 1169 * szc the same as for previous already locked pages
1169 1170 * with right identity. Since this page had correct
1170 1171 * szc after we locked it can't get freed or destroyed
1171 1172 * and therefore must have the expected identity.
1172 1173 */
1173 1174 ASSERT(!PP_ISFREE(pp));
1174 1175 if (pp->p_vnode != vp ||
1175 1176 pp->p_offset != off) {
1176 1177 panic("page_exists_physcontig: "
1177 1178 "large page identity doesn't match");
1178 1179 }
1179 1180 ppa[i] = pp;
1180 1181 ASSERT(pp->p_pagenum == pfn);
1181 1182 }
1182 1183 VM_STAT_ADD(page_exphcontg[8]);
1183 1184 ppa[pages] = NULL;
1184 1185 return (1);
1185 1186 } else if (pszc >= szc) {
1186 1187 VM_STAT_ADD(page_exphcontg[9]);
1187 1188 if (!IS_P2ALIGNED(pfn, pages)) {
1188 1189 return (0);
1189 1190 }
1190 1191 return (1);
1191 1192 }
1192 1193
1193 1194 if (!IS_P2ALIGNED(pfn, pages)) {
1194 1195 VM_STAT_ADD(page_exphcontg[10]);
1195 1196 return (0);
1196 1197 }
1197 1198
1198 1199 if (page_numtomemseg_nolock(pfn) !=
1199 1200 page_numtomemseg_nolock(pfn + pages - 1)) {
1200 1201 VM_STAT_ADD(page_exphcontg[11]);
1201 1202 return (0);
1202 1203 }
1203 1204
1204 1205 /*
1205 1206 * We loop up 4 times across pages to promote page size.
1206 1207 * We're extra cautious to promote page size atomically with respect
1207 1208 * to everybody else. But we can probably optimize into 1 loop if
1208 1209 * this becomes an issue.
1209 1210 */
1210 1211
1211 1212 for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1212 1213 if (!page_trylock(pp, SE_EXCL)) {
1213 1214 VM_STAT_ADD(page_exphcontg[12]);
1214 1215 break;
1215 1216 }
1216 1217 /*
1217 1218 * Check whether p_pagenum was modified by DR.
1218 1219 */
1219 1220 if (pp->p_pagenum != pfn) {
1220 1221 page_unlock(pp);
1221 1222 break;
1222 1223 }
1223 1224 if (pp->p_vnode != vp ||
1224 1225 pp->p_offset != off) {
1225 1226 VM_STAT_ADD(page_exphcontg[13]);
1226 1227 page_unlock(pp);
1227 1228 break;
1228 1229 }
1229 1230 if (pp->p_szc >= szc) {
1230 1231 ASSERT(i == 0);
1231 1232 page_unlock(pp);
1232 1233 off = save_off;
1233 1234 goto again;
1234 1235 }
1235 1236 }
1236 1237
1237 1238 if (i != pages) {
1238 1239 VM_STAT_ADD(page_exphcontg[14]);
1239 1240 --pp;
1240 1241 while (i-- > 0) {
1241 1242 page_unlock(pp);
1242 1243 --pp;
1243 1244 }
1244 1245 return (0);
1245 1246 }
1246 1247
1247 1248 pp = rootpp;
1248 1249 for (i = 0; i < pages; i++, pp++) {
1249 1250 if (PP_ISFREE(pp)) {
1250 1251 VM_STAT_ADD(page_exphcontg[15]);
1251 1252 ASSERT(!PP_ISAGED(pp));
1252 1253 ASSERT(pp->p_szc == 0);
1253 1254 if (!page_reclaim(pp, NULL)) {
1254 1255 break;
1255 1256 }
1256 1257 } else {
1257 1258 ASSERT(pp->p_szc < szc);
1258 1259 VM_STAT_ADD(page_exphcontg[16]);
1259 1260 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1260 1261 }
1261 1262 }
1262 1263 if (i < pages) {
1263 1264 VM_STAT_ADD(page_exphcontg[17]);
1264 1265 /*
1265 1266 * page_reclaim failed because we were out of memory.
1266 1267 * drop the rest of the locks and return because this page
1267 1268 * must be already reallocated anyway.
1268 1269 */
1269 1270 pp = rootpp;
1270 1271 for (j = 0; j < pages; j++, pp++) {
1271 1272 if (j != i) {
1272 1273 page_unlock(pp);
1273 1274 }
1274 1275 }
1275 1276 return (0);
1276 1277 }
1277 1278
1278 1279 off = save_off;
1279 1280 pp = rootpp;
1280 1281 for (i = 0; i < pages; i++, pp++, off += PAGESIZE) {
1281 1282 ASSERT(PAGE_EXCL(pp));
1282 1283 ASSERT(!PP_ISFREE(pp));
1283 1284 ASSERT(!hat_page_is_mapped(pp));
1284 1285 ASSERT(pp->p_vnode == vp);
1285 1286 ASSERT(pp->p_offset == off);
1286 1287 pp->p_szc = szc;
1287 1288 }
1288 1289 pp = rootpp;
1289 1290 for (i = 0; i < pages; i++, pp++) {
1290 1291 if (ppa == NULL) {
1291 1292 page_unlock(pp);
1292 1293 } else {
1293 1294 ppa[i] = pp;
1294 1295 page_downgrade(ppa[i]);
1295 1296 }
1296 1297 }
1297 1298 if (ppa != NULL) {
1298 1299 ppa[pages] = NULL;
1299 1300 }
1300 1301 VM_STAT_ADD(page_exphcontg[18]);
1301 1302 ASSERT(vp->v_pages != NULL);
1302 1303 return (1);
1303 1304 }
1304 1305
1305 1306 /*
1306 1307 * Determine whether a page with the specified [vp, off]
1307 1308 * currently exists in the system and if so return its
1308 1309 * size code. Obviously this should only be considered as
1309 1310 * a hint since nothing prevents the page from disappearing
1310 1311 * or appearing immediately after the return from this routine.
1311 1312 */
1312 1313 int
1313 1314 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc)
1314 1315 {
1315 1316 page_t *pp;
1316 1317 kmutex_t *phm;
1317 1318 ulong_t index;
1318 1319 int rc = 0;
1319 1320
1320 1321 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1321 1322 ASSERT(szc != NULL);
1322 1323 VM_STAT_ADD(page_exists_forreal_cnt);
1323 1324
1324 1325 index = PAGE_HASH_FUNC(vp, off);
1325 1326 phm = PAGE_HASH_MUTEX(index);
1326 1327
1327 1328 mutex_enter(phm);
1328 1329 pp = page_hash_search(index, vp, off);
1329 1330 if (pp != NULL) {
1330 1331 *szc = pp->p_szc;
1331 1332 rc = 1;
1332 1333 }
1333 1334 mutex_exit(phm);
1334 1335 return (rc);
1335 1336 }
1336 1337
1337 1338 /* wakeup threads waiting for pages in page_create_get_something() */
1338 1339 void
1339 1340 wakeup_pcgs(void)
1340 1341 {
1341 1342 if (!CV_HAS_WAITERS(&pcgs_cv))
1342 1343 return;
1343 1344 cv_broadcast(&pcgs_cv);
1344 1345 }
1345 1346
1346 1347 /*
1347 1348 * 'freemem' is used all over the kernel as an indication of how many
1348 1349 * pages are free (either on the cache list or on the free page list)
1349 1350 * in the system. In very few places is a really accurate 'freemem'
1350 1351 * needed. To avoid contention of the lock protecting a the
1351 1352 * single freemem, it was spread out into NCPU buckets. Set_freemem
1352 1353 * sets freemem to the total of all NCPU buckets. It is called from
1353 1354 * clock() on each TICK.
1354 1355 */
1355 1356 void
1356 1357 set_freemem(void)
1357 1358 {
1358 1359 struct pcf *p;
1359 1360 ulong_t t;
1360 1361 uint_t i;
1361 1362
1362 1363 t = 0;
1363 1364 p = pcf;
1364 1365 for (i = 0; i < pcf_fanout; i++) {
1365 1366 t += p->pcf_count;
1366 1367 p++;
1367 1368 }
1368 1369 freemem = t;
1369 1370
1370 1371 /*
1371 1372 * Don't worry about grabbing mutex. It's not that
1372 1373 * critical if we miss a tick or two. This is
1373 1374 * where we wakeup possible delayers in
1374 1375 * page_create_get_something().
1375 1376 */
1376 1377 wakeup_pcgs();
1377 1378 }
1378 1379
1379 1380 ulong_t
1380 1381 get_freemem()
1381 1382 {
1382 1383 struct pcf *p;
1383 1384 ulong_t t;
1384 1385 uint_t i;
1385 1386
1386 1387 t = 0;
1387 1388 p = pcf;
1388 1389 for (i = 0; i < pcf_fanout; i++) {
1389 1390 t += p->pcf_count;
1390 1391 p++;
1391 1392 }
1392 1393 /*
1393 1394 * We just calculated it, might as well set it.
1394 1395 */
1395 1396 freemem = t;
1396 1397 return (t);
1397 1398 }
1398 1399
1399 1400 /*
1400 1401 * Acquire all of the page cache & free (pcf) locks.
1401 1402 */
1402 1403 void
1403 1404 pcf_acquire_all()
1404 1405 {
1405 1406 struct pcf *p;
1406 1407 uint_t i;
1407 1408
1408 1409 p = pcf;
1409 1410 for (i = 0; i < pcf_fanout; i++) {
1410 1411 mutex_enter(&p->pcf_lock);
1411 1412 p++;
1412 1413 }
1413 1414 }
1414 1415
1415 1416 /*
1416 1417 * Release all the pcf_locks.
1417 1418 */
1418 1419 void
1419 1420 pcf_release_all()
1420 1421 {
1421 1422 struct pcf *p;
1422 1423 uint_t i;
1423 1424
1424 1425 p = pcf;
1425 1426 for (i = 0; i < pcf_fanout; i++) {
1426 1427 mutex_exit(&p->pcf_lock);
1427 1428 p++;
1428 1429 }
1429 1430 }
1430 1431
1431 1432 /*
1432 1433 * Inform the VM system that we need some pages freed up.
1433 1434 * Calls must be symmetric, e.g.:
1434 1435 *
1435 1436 * page_needfree(100);
1436 1437 * wait a bit;
1437 1438 * page_needfree(-100);
1438 1439 */
1439 1440 void
1440 1441 page_needfree(spgcnt_t npages)
1441 1442 {
1442 1443 mutex_enter(&new_freemem_lock);
1443 1444 needfree += npages;
1444 1445 mutex_exit(&new_freemem_lock);
1445 1446 }
1446 1447
1447 1448 /*
1448 1449 * Throttle for page_create(): try to prevent freemem from dropping
1449 1450 * below throttlefree. We can't provide a 100% guarantee because
1450 1451 * KM_NOSLEEP allocations, page_reclaim(), and various other things
1451 1452 * nibble away at the freelist. However, we can block all PG_WAIT
1452 1453 * allocations until memory becomes available. The motivation is
1453 1454 * that several things can fall apart when there's no free memory:
1454 1455 *
1455 1456 * (1) If pageout() needs memory to push a page, the system deadlocks.
1456 1457 *
1457 1458 * (2) By (broken) specification, timeout(9F) can neither fail nor
1458 1459 * block, so it has no choice but to panic the system if it
1459 1460 * cannot allocate a callout structure.
1460 1461 *
1461 1462 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block;
1462 1463 * it panics if it cannot allocate a callback structure.
1463 1464 *
1464 1465 * (4) Untold numbers of third-party drivers have not yet been hardened
1465 1466 * against KM_NOSLEEP and/or allocb() failures; they simply assume
1466 1467 * success and panic the system with a data fault on failure.
1467 1468 * (The long-term solution to this particular problem is to ship
1468 1469 * hostile fault-injecting DEBUG kernels with the DDK.)
1469 1470 *
1470 1471 * It is theoretically impossible to guarantee success of non-blocking
1471 1472 * allocations, but in practice, this throttle is very hard to break.
1472 1473 */
1473 1474 static int
1474 1475 page_create_throttle(pgcnt_t npages, int flags)
1475 1476 {
1476 1477 ulong_t fm;
1477 1478 uint_t i;
1478 1479 pgcnt_t tf; /* effective value of throttlefree */
1479 1480
1480 1481 atomic_inc_64(&n_throttle);
1481 1482
1482 1483 /*
1483 1484 * Normal priority allocations.
1484 1485 */
1485 1486 if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) {
1486 1487 ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE)));
1487 1488 return (freemem >= npages + throttlefree);
1488 1489 }
1489 1490
1490 1491 /*
1491 1492 * Never deny pages when:
1492 1493 * - it's a thread that cannot block [NOMEMWAIT()]
1493 1494 * - the allocation cannot block and must not fail
1494 1495 * - the allocation cannot block and is pageout dispensated
1495 1496 */
1496 1497 if (NOMEMWAIT() ||
1497 1498 ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) ||
1498 1499 ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE))
1499 1500 return (1);
1500 1501
1501 1502 /*
1502 1503 * If the allocation can't block, we look favorably upon it
1503 1504 * unless we're below pageout_reserve. In that case we fail
1504 1505 * the allocation because we want to make sure there are a few
1505 1506 * pages available for pageout.
1506 1507 */
1507 1508 if ((flags & PG_WAIT) == 0)
1508 1509 return (freemem >= npages + pageout_reserve);
1509 1510
1510 1511 /* Calculate the effective throttlefree value */
1511 1512 tf = throttlefree -
1512 1513 ((flags & PG_PUSHPAGE) ? pageout_reserve : 0);
1513 1514
1514 1515 WAKE_PAGEOUT_SCANNER();
1515 1516
1516 1517 for (;;) {
1517 1518 fm = 0;
1518 1519 pcf_acquire_all();
1519 1520 mutex_enter(&new_freemem_lock);
1520 1521 for (i = 0; i < pcf_fanout; i++) {
1521 1522 fm += pcf[i].pcf_count;
1522 1523 pcf[i].pcf_wait++;
1523 1524 mutex_exit(&pcf[i].pcf_lock);
1524 1525 }
1525 1526 freemem = fm;
1526 1527 if (freemem >= npages + tf) {
1527 1528 mutex_exit(&new_freemem_lock);
1528 1529 break;
1529 1530 }
1530 1531 needfree += npages;
1531 1532 freemem_wait++;
1532 1533 cv_wait(&freemem_cv, &new_freemem_lock);
1533 1534 freemem_wait--;
1534 1535 needfree -= npages;
1535 1536 mutex_exit(&new_freemem_lock);
1536 1537 }
1537 1538 return (1);
1538 1539 }
1539 1540
1540 1541 /*
1541 1542 * page_create_wait() is called to either coalesce pages from the
1542 1543 * different pcf buckets or to wait because there simply are not
1543 1544 * enough pages to satisfy the caller's request.
1544 1545 *
1545 1546 * Sadly, this is called from platform/vm/vm_machdep.c
1546 1547 */
1547 1548 int
1548 1549 page_create_wait(pgcnt_t npages, uint_t flags)
1549 1550 {
1550 1551 pgcnt_t total;
1551 1552 uint_t i;
1552 1553 struct pcf *p;
1553 1554
1554 1555 /*
1555 1556 * Wait until there are enough free pages to satisfy our
1556 1557 * entire request.
1557 1558 * We set needfree += npages before prodding pageout, to make sure
1558 1559 * it does real work when npages > lotsfree > freemem.
1559 1560 */
1560 1561 VM_STAT_ADD(page_create_not_enough);
1561 1562
1562 1563 ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1);
1563 1564 checkagain:
1564 1565 if ((flags & PG_NORELOC) &&
1565 1566 kcage_freemem < kcage_throttlefree + npages)
1566 1567 (void) kcage_create_throttle(npages, flags);
1567 1568
1568 1569 if (freemem < npages + throttlefree)
1569 1570 if (!page_create_throttle(npages, flags))
1570 1571 return (0);
1571 1572
1572 1573 if (pcf_decrement_bucket(npages) ||
1573 1574 pcf_decrement_multiple(&total, npages, 0))
1574 1575 return (1);
1575 1576
1576 1577 /*
1577 1578 * All of the pcf locks are held, there are not enough pages
1578 1579 * to satisfy the request (npages < total).
1579 1580 * Be sure to acquire the new_freemem_lock before dropping
1580 1581 * the pcf locks. This prevents dropping wakeups in page_free().
1581 1582 * The order is always pcf_lock then new_freemem_lock.
1582 1583 *
1583 1584 * Since we hold all the pcf locks, it is a good time to set freemem.
1584 1585 *
1585 1586 * If the caller does not want to wait, return now.
1586 1587 * Else turn the pageout daemon loose to find something
1587 1588 * and wait till it does.
1588 1589 *
1589 1590 */
1590 1591 freemem = total;
1591 1592
1592 1593 if ((flags & PG_WAIT) == 0) {
1593 1594 pcf_release_all();
1594 1595
1595 1596 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM,
1596 1597 "page_create_nomem:npages %ld freemem %ld", npages, freemem);
1597 1598 return (0);
1598 1599 }
1599 1600
1600 1601 ASSERT(proc_pageout != NULL);
1601 1602 WAKE_PAGEOUT_SCANNER();
1602 1603
1603 1604 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START,
1604 1605 "page_create_sleep_start: freemem %ld needfree %ld",
1605 1606 freemem, needfree);
1606 1607
1607 1608 /*
1608 1609 * We are going to wait.
1609 1610 * We currently hold all of the pcf_locks,
1610 1611 * get the new_freemem_lock (it protects freemem_wait),
1611 1612 * before dropping the pcf_locks.
1612 1613 */
1613 1614 mutex_enter(&new_freemem_lock);
1614 1615
1615 1616 p = pcf;
1616 1617 for (i = 0; i < pcf_fanout; i++) {
1617 1618 p->pcf_wait++;
1618 1619 mutex_exit(&p->pcf_lock);
1619 1620 p++;
1620 1621 }
1621 1622
1622 1623 needfree += npages;
1623 1624 freemem_wait++;
1624 1625
1625 1626 cv_wait(&freemem_cv, &new_freemem_lock);
1626 1627
1627 1628 freemem_wait--;
1628 1629 needfree -= npages;
1629 1630
1630 1631 mutex_exit(&new_freemem_lock);
1631 1632
1632 1633 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END,
1633 1634 "page_create_sleep_end: freemem %ld needfree %ld",
1634 1635 freemem, needfree);
1635 1636
1636 1637 VM_STAT_ADD(page_create_not_enough_again);
1637 1638 goto checkagain;
1638 1639 }
1639 1640 /*
1640 1641 * A routine to do the opposite of page_create_wait().
1641 1642 */
1642 1643 void
1643 1644 page_create_putback(spgcnt_t npages)
1644 1645 {
1645 1646 struct pcf *p;
1646 1647 pgcnt_t lump;
1647 1648 uint_t *which;
1648 1649
1649 1650 /*
1650 1651 * When a contiguous lump is broken up, we have to
1651 1652 * deal with lots of pages (min 64) so lets spread
1652 1653 * the wealth around.
1653 1654 */
1654 1655 lump = roundup(npages, pcf_fanout) / pcf_fanout;
1655 1656 freemem += npages;
1656 1657
1657 1658 for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) {
1658 1659 which = &p->pcf_count;
1659 1660
1660 1661 mutex_enter(&p->pcf_lock);
1661 1662
1662 1663 if (p->pcf_block) {
1663 1664 which = &p->pcf_reserve;
1664 1665 }
1665 1666
1666 1667 if (lump < npages) {
1667 1668 *which += (uint_t)lump;
1668 1669 npages -= lump;
1669 1670 } else {
1670 1671 *which += (uint_t)npages;
1671 1672 npages = 0;
1672 1673 }
1673 1674
1674 1675 if (p->pcf_wait) {
1675 1676 mutex_enter(&new_freemem_lock);
1676 1677 /*
1677 1678 * Check to see if some other thread
1678 1679 * is actually waiting. Another bucket
1679 1680 * may have woken it up by now. If there
1680 1681 * are no waiters, then set our pcf_wait
1681 1682 * count to zero to avoid coming in here
1682 1683 * next time.
1683 1684 */
1684 1685 if (freemem_wait) {
1685 1686 if (npages > 1) {
1686 1687 cv_broadcast(&freemem_cv);
1687 1688 } else {
1688 1689 cv_signal(&freemem_cv);
1689 1690 }
1690 1691 p->pcf_wait--;
1691 1692 } else {
1692 1693 p->pcf_wait = 0;
1693 1694 }
1694 1695 mutex_exit(&new_freemem_lock);
1695 1696 }
1696 1697 mutex_exit(&p->pcf_lock);
1697 1698 }
1698 1699 ASSERT(npages == 0);
1699 1700 }
1700 1701
1701 1702 /*
1702 1703 * A helper routine for page_create_get_something.
1703 1704 * The indenting got to deep down there.
1704 1705 * Unblock the pcf counters. Any pages freed after
1705 1706 * pcf_block got set are moved to pcf_count and
1706 1707 * wakeups (cv_broadcast() or cv_signal()) are done as needed.
1707 1708 */
1708 1709 static void
1709 1710 pcgs_unblock(void)
1710 1711 {
1711 1712 int i;
1712 1713 struct pcf *p;
1713 1714
1714 1715 /* Update freemem while we're here. */
1715 1716 freemem = 0;
1716 1717 p = pcf;
1717 1718 for (i = 0; i < pcf_fanout; i++) {
1718 1719 mutex_enter(&p->pcf_lock);
1719 1720 ASSERT(p->pcf_count == 0);
1720 1721 p->pcf_count = p->pcf_reserve;
1721 1722 p->pcf_block = 0;
1722 1723 freemem += p->pcf_count;
1723 1724 if (p->pcf_wait) {
1724 1725 mutex_enter(&new_freemem_lock);
1725 1726 if (freemem_wait) {
1726 1727 if (p->pcf_reserve > 1) {
1727 1728 cv_broadcast(&freemem_cv);
1728 1729 p->pcf_wait = 0;
1729 1730 } else {
1730 1731 cv_signal(&freemem_cv);
1731 1732 p->pcf_wait--;
1732 1733 }
1733 1734 } else {
1734 1735 p->pcf_wait = 0;
1735 1736 }
1736 1737 mutex_exit(&new_freemem_lock);
1737 1738 }
1738 1739 p->pcf_reserve = 0;
1739 1740 mutex_exit(&p->pcf_lock);
1740 1741 p++;
1741 1742 }
1742 1743 }
1743 1744
1744 1745 /*
1745 1746 * Called from page_create_va() when both the cache and free lists
1746 1747 * have been checked once.
1747 1748 *
1748 1749 * Either returns a page or panics since the accounting was done
1749 1750 * way before we got here.
1750 1751 *
1751 1752 * We don't come here often, so leave the accounting on permanently.
1752 1753 */
1753 1754
1754 1755 #define MAX_PCGS 100
1755 1756
1756 1757 #ifdef DEBUG
1757 1758 #define PCGS_TRIES 100
1758 1759 #else /* DEBUG */
1759 1760 #define PCGS_TRIES 10
1760 1761 #endif /* DEBUG */
1761 1762
1762 1763 #ifdef VM_STATS
1763 1764 uint_t pcgs_counts[PCGS_TRIES];
1764 1765 uint_t pcgs_too_many;
1765 1766 uint_t pcgs_entered;
1766 1767 uint_t pcgs_entered_noreloc;
1767 1768 uint_t pcgs_locked;
1768 1769 uint_t pcgs_cagelocked;
1769 1770 #endif /* VM_STATS */
1770 1771
1771 1772 static page_t *
1772 1773 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg,
1773 1774 caddr_t vaddr, uint_t flags)
1774 1775 {
1775 1776 uint_t count;
1776 1777 page_t *pp;
1777 1778 uint_t locked, i;
1778 1779 struct pcf *p;
1779 1780 lgrp_t *lgrp;
1780 1781 int cagelocked = 0;
1781 1782
1782 1783 VM_STAT_ADD(pcgs_entered);
1783 1784
1784 1785 /*
1785 1786 * Tap any reserve freelists: if we fail now, we'll die
1786 1787 * since the page(s) we're looking for have already been
1787 1788 * accounted for.
1788 1789 */
1789 1790 flags |= PG_PANIC;
1790 1791
1791 1792 if ((flags & PG_NORELOC) != 0) {
1792 1793 VM_STAT_ADD(pcgs_entered_noreloc);
1793 1794 /*
1794 1795 * Requests for free pages from critical threads
1795 1796 * such as pageout still won't throttle here, but
1796 1797 * we must try again, to give the cageout thread
1797 1798 * another chance to catch up. Since we already
1798 1799 * accounted for the pages, we had better get them
1799 1800 * this time.
1800 1801 *
1801 1802 * N.B. All non-critical threads acquire the pcgs_cagelock
1802 1803 * to serialize access to the freelists. This implements a
1803 1804 * turnstile-type synchornization to avoid starvation of
1804 1805 * critical requests for PG_NORELOC memory by non-critical
1805 1806 * threads: all non-critical threads must acquire a 'ticket'
1806 1807 * before passing through, which entails making sure
1807 1808 * kcage_freemem won't fall below minfree prior to grabbing
1808 1809 * pages from the freelists.
1809 1810 */
1810 1811 if (kcage_create_throttle(1, flags) == KCT_NONCRIT) {
1811 1812 mutex_enter(&pcgs_cagelock);
1812 1813 cagelocked = 1;
1813 1814 VM_STAT_ADD(pcgs_cagelocked);
1814 1815 }
1815 1816 }
1816 1817
1817 1818 /*
1818 1819 * Time to get serious.
1819 1820 * We failed to get a `correctly colored' page from both the
1820 1821 * free and cache lists.
1821 1822 * We escalate in stage.
1822 1823 *
1823 1824 * First try both lists without worring about color.
1824 1825 *
1825 1826 * Then, grab all page accounting locks (ie. pcf[]) and
1826 1827 * steal any pages that they have and set the pcf_block flag to
1827 1828 * stop deletions from the lists. This will help because
1828 1829 * a page can get added to the free list while we are looking
1829 1830 * at the cache list, then another page could be added to the cache
1830 1831 * list allowing the page on the free list to be removed as we
1831 1832 * move from looking at the cache list to the free list. This
1832 1833 * could happen over and over. We would never find the page
1833 1834 * we have accounted for.
1834 1835 *
1835 1836 * Noreloc pages are a subset of the global (relocatable) page pool.
1836 1837 * They are not tracked separately in the pcf bins, so it is
1837 1838 * impossible to know when doing pcf accounting if the available
1838 1839 * page(s) are noreloc pages or not. When looking for a noreloc page
1839 1840 * it is quite easy to end up here even if the global (relocatable)
1840 1841 * page pool has plenty of free pages but the noreloc pool is empty.
1841 1842 *
1842 1843 * When the noreloc pool is empty (or low), additional noreloc pages
1843 1844 * are created by converting pages from the global page pool. This
1844 1845 * process will stall during pcf accounting if the pcf bins are
1845 1846 * already locked. Such is the case when a noreloc allocation is
1846 1847 * looping here in page_create_get_something waiting for more noreloc
1847 1848 * pages to appear.
1848 1849 *
1849 1850 * Short of adding a new field to the pcf bins to accurately track
1850 1851 * the number of free noreloc pages, we instead do not grab the
1851 1852 * pcgs_lock, do not set the pcf blocks and do not timeout when
1852 1853 * allocating a noreloc page. This allows noreloc allocations to
1853 1854 * loop without blocking global page pool allocations.
1854 1855 *
1855 1856 * NOTE: the behaviour of page_create_get_something has not changed
1856 1857 * for the case of global page pool allocations.
1857 1858 */
1858 1859
1859 1860 flags &= ~PG_MATCH_COLOR;
1860 1861 locked = 0;
1861 1862 #if defined(__i386) || defined(__amd64)
1862 1863 flags = page_create_update_flags_x86(flags);
1863 1864 #endif
1864 1865
1865 1866 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
1866 1867
1867 1868 for (count = 0; kcage_on || count < MAX_PCGS; count++) {
1868 1869 pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
1869 1870 flags, lgrp);
1870 1871 if (pp == NULL) {
1871 1872 pp = page_get_cachelist(vp, off, seg, vaddr,
1872 1873 flags, lgrp);
1873 1874 }
1874 1875 if (pp == NULL) {
1875 1876 /*
1876 1877 * Serialize. Don't fight with other pcgs().
1877 1878 */
1878 1879 if (!locked && (!kcage_on || !(flags & PG_NORELOC))) {
1879 1880 mutex_enter(&pcgs_lock);
1880 1881 VM_STAT_ADD(pcgs_locked);
1881 1882 locked = 1;
1882 1883 p = pcf;
1883 1884 for (i = 0; i < pcf_fanout; i++) {
1884 1885 mutex_enter(&p->pcf_lock);
1885 1886 ASSERT(p->pcf_block == 0);
1886 1887 p->pcf_block = 1;
1887 1888 p->pcf_reserve = p->pcf_count;
1888 1889 p->pcf_count = 0;
1889 1890 mutex_exit(&p->pcf_lock);
1890 1891 p++;
1891 1892 }
1892 1893 freemem = 0;
1893 1894 }
1894 1895
1895 1896 if (count) {
1896 1897 /*
1897 1898 * Since page_free() puts pages on
1898 1899 * a list then accounts for it, we
1899 1900 * just have to wait for page_free()
1900 1901 * to unlock any page it was working
1901 1902 * with. The page_lock()-page_reclaim()
1902 1903 * path falls in the same boat.
1903 1904 *
1904 1905 * We don't need to check on the
1905 1906 * PG_WAIT flag, we have already
1906 1907 * accounted for the page we are
1907 1908 * looking for in page_create_va().
1908 1909 *
1909 1910 * We just wait a moment to let any
1910 1911 * locked pages on the lists free up,
1911 1912 * then continue around and try again.
1912 1913 *
1913 1914 * Will be awakened by set_freemem().
1914 1915 */
1915 1916 mutex_enter(&pcgs_wait_lock);
1916 1917 cv_wait(&pcgs_cv, &pcgs_wait_lock);
1917 1918 mutex_exit(&pcgs_wait_lock);
1918 1919 }
1919 1920 } else {
1920 1921 #ifdef VM_STATS
1921 1922 if (count >= PCGS_TRIES) {
1922 1923 VM_STAT_ADD(pcgs_too_many);
1923 1924 } else {
1924 1925 VM_STAT_ADD(pcgs_counts[count]);
1925 1926 }
1926 1927 #endif
1927 1928 if (locked) {
1928 1929 pcgs_unblock();
1929 1930 mutex_exit(&pcgs_lock);
1930 1931 }
1931 1932 if (cagelocked)
1932 1933 mutex_exit(&pcgs_cagelock);
1933 1934 return (pp);
1934 1935 }
1935 1936 }
1936 1937 /*
1937 1938 * we go down holding the pcf locks.
1938 1939 */
1939 1940 panic("no %spage found %d",
1940 1941 ((flags & PG_NORELOC) ? "non-reloc " : ""), count);
1941 1942 /*NOTREACHED*/
1942 1943 }
1943 1944
1944 1945 /*
1945 1946 * Create enough pages for "bytes" worth of data starting at
1946 1947 * "off" in "vp".
1947 1948 *
1948 1949 * Where flag must be one of:
1949 1950 *
1950 1951 * PG_EXCL: Exclusive create (fail if any page already
1951 1952 * exists in the page cache) which does not
1952 1953 * wait for memory to become available.
1953 1954 *
1954 1955 * PG_WAIT: Non-exclusive create which can wait for
1955 1956 * memory to become available.
1956 1957 *
1957 1958 * PG_PHYSCONTIG: Allocate physically contiguous pages.
1958 1959 * (Not Supported)
1959 1960 *
1960 1961 * A doubly linked list of pages is returned to the caller. Each page
1961 1962 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock)
1962 1963 * lock.
1963 1964 *
1964 1965 * Unable to change the parameters to page_create() in a minor release,
1965 1966 * we renamed page_create() to page_create_va(), changed all known calls
1966 1967 * from page_create() to page_create_va(), and created this wrapper.
1967 1968 *
1968 1969 * Upon a major release, we should break compatibility by deleting this
1969 1970 * wrapper, and replacing all the strings "page_create_va", with "page_create".
1970 1971 *
1971 1972 * NOTE: There is a copy of this interface as page_create_io() in
1972 1973 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied
1973 1974 * there.
1974 1975 */
1975 1976 page_t *
1976 1977 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags)
1977 1978 {
1978 1979 caddr_t random_vaddr;
1979 1980 struct seg kseg;
1980 1981
1981 1982 #ifdef DEBUG
1982 1983 cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p",
1983 1984 (void *)caller());
1984 1985 #endif
1985 1986
1986 1987 random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^
1987 1988 (uintptr_t)(off >> PAGESHIFT));
1988 1989 kseg.s_as = &kas;
1989 1990
1990 1991 return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr));
1991 1992 }
1992 1993
1993 1994 #ifdef DEBUG
1994 1995 uint32_t pg_alloc_pgs_mtbf = 0;
1995 1996 #endif
1996 1997
1997 1998 /*
1998 1999 * Used for large page support. It will attempt to allocate
1999 2000 * a large page(s) off the freelist.
2000 2001 *
2001 2002 * Returns non zero on failure.
2002 2003 */
2003 2004 int
2004 2005 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr,
2005 2006 page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags)
2006 2007 {
2007 2008 pgcnt_t npgs, curnpgs, totpgs;
2008 2009 size_t pgsz;
2009 2010 page_t *pplist = NULL, *pp;
2010 2011 int err = 0;
2011 2012 lgrp_t *lgrp;
2012 2013
2013 2014 ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1));
2014 2015 ASSERT(pgflags == 0 || pgflags == PG_LOCAL);
2015 2016
2016 2017 /*
2017 2018 * Check if system heavily prefers local large pages over remote
2018 2019 * on systems with multiple lgroups.
2019 2020 */
2020 2021 if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) {
2021 2022 pgflags = PG_LOCAL;
2022 2023 }
2023 2024
2024 2025 VM_STAT_ADD(alloc_pages[0]);
2025 2026
2026 2027 #ifdef DEBUG
2027 2028 if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) {
2028 2029 return (ENOMEM);
2029 2030 }
2030 2031 #endif
2031 2032
2032 2033 /*
2033 2034 * One must be NULL but not both.
2034 2035 * And one must be non NULL but not both.
2035 2036 */
2036 2037 ASSERT(basepp != NULL || ppa != NULL);
2037 2038 ASSERT(basepp == NULL || ppa == NULL);
2038 2039
2039 2040 #if defined(__i386) || defined(__amd64)
2040 2041 while (page_chk_freelist(szc) == 0) {
2041 2042 VM_STAT_ADD(alloc_pages[8]);
2042 2043 if (anypgsz == 0 || --szc == 0)
2043 2044 return (ENOMEM);
2044 2045 }
2045 2046 #endif
2046 2047
2047 2048 pgsz = page_get_pagesize(szc);
2048 2049 totpgs = curnpgs = npgs = pgsz >> PAGESHIFT;
2049 2050
2050 2051 ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0);
2051 2052
2052 2053 (void) page_create_wait(npgs, PG_WAIT);
2053 2054
2054 2055 while (npgs && szc) {
2055 2056 lgrp = lgrp_mem_choose(seg, addr, pgsz);
2056 2057 if (pgflags == PG_LOCAL) {
2057 2058 pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2058 2059 pgflags, lgrp);
2059 2060 if (pp == NULL) {
2060 2061 pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2061 2062 0, lgrp);
2062 2063 }
2063 2064 } else {
2064 2065 pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2065 2066 0, lgrp);
2066 2067 }
2067 2068 if (pp != NULL) {
2068 2069 VM_STAT_ADD(alloc_pages[1]);
2069 2070 page_list_concat(&pplist, &pp);
2070 2071 ASSERT(npgs >= curnpgs);
2071 2072 npgs -= curnpgs;
2072 2073 } else if (anypgsz) {
2073 2074 VM_STAT_ADD(alloc_pages[2]);
2074 2075 szc--;
2075 2076 pgsz = page_get_pagesize(szc);
2076 2077 curnpgs = pgsz >> PAGESHIFT;
2077 2078 } else {
2078 2079 VM_STAT_ADD(alloc_pages[3]);
2079 2080 ASSERT(npgs == totpgs);
2080 2081 page_create_putback(npgs);
2081 2082 return (ENOMEM);
2082 2083 }
2083 2084 }
2084 2085 if (szc == 0) {
2085 2086 VM_STAT_ADD(alloc_pages[4]);
2086 2087 ASSERT(npgs != 0);
2087 2088 page_create_putback(npgs);
2088 2089 err = ENOMEM;
2089 2090 } else if (basepp != NULL) {
2090 2091 ASSERT(npgs == 0);
2091 2092 ASSERT(ppa == NULL);
2092 2093 *basepp = pplist;
2093 2094 }
2094 2095
2095 2096 npgs = totpgs - npgs;
2096 2097 pp = pplist;
2097 2098
2098 2099 /*
2099 2100 * Clear the free and age bits. Also if we were passed in a ppa then
2100 2101 * fill it in with all the constituent pages from the large page. But
2101 2102 * if we failed to allocate all the pages just free what we got.
2102 2103 */
2103 2104 while (npgs != 0) {
2104 2105 ASSERT(PP_ISFREE(pp));
2105 2106 ASSERT(PP_ISAGED(pp));
2106 2107 if (ppa != NULL || err != 0) {
2107 2108 if (err == 0) {
2108 2109 VM_STAT_ADD(alloc_pages[5]);
2109 2110 PP_CLRFREE(pp);
2110 2111 PP_CLRAGED(pp);
2111 2112 page_sub(&pplist, pp);
2112 2113 *ppa++ = pp;
2113 2114 npgs--;
2114 2115 } else {
2115 2116 VM_STAT_ADD(alloc_pages[6]);
2116 2117 ASSERT(pp->p_szc != 0);
2117 2118 curnpgs = page_get_pagecnt(pp->p_szc);
2118 2119 page_list_break(&pp, &pplist, curnpgs);
2119 2120 page_list_add_pages(pp, 0);
2120 2121 page_create_putback(curnpgs);
2121 2122 ASSERT(npgs >= curnpgs);
2122 2123 npgs -= curnpgs;
2123 2124 }
2124 2125 pp = pplist;
2125 2126 } else {
2126 2127 VM_STAT_ADD(alloc_pages[7]);
2127 2128 PP_CLRFREE(pp);
2128 2129 PP_CLRAGED(pp);
2129 2130 pp = pp->p_next;
2130 2131 npgs--;
2131 2132 }
2132 2133 }
2133 2134 return (err);
2134 2135 }
2135 2136
2136 2137 /*
2137 2138 * Get a single large page off of the freelists, and set it up for use.
2138 2139 * Number of bytes requested must be a supported page size.
2139 2140 *
2140 2141 * Note that this call may fail even if there is sufficient
2141 2142 * memory available or PG_WAIT is set, so the caller must
2142 2143 * be willing to fallback on page_create_va(), block and retry,
2143 2144 * or fail the requester.
2144 2145 */
2145 2146 page_t *
2146 2147 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
2147 2148 struct seg *seg, caddr_t vaddr, void *arg)
2148 2149 {
2149 2150 pgcnt_t npages;
2150 2151 page_t *pp;
2151 2152 page_t *rootpp;
2152 2153 lgrp_t *lgrp;
2153 2154 lgrp_id_t *lgrpid = (lgrp_id_t *)arg;
2154 2155
2155 2156 ASSERT(vp != NULL);
2156 2157
2157 2158 ASSERT((flags & ~(PG_EXCL | PG_WAIT |
2158 2159 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
2159 2160 /* but no others */
2160 2161
2161 2162 ASSERT((flags & PG_EXCL) == PG_EXCL);
2162 2163
2163 2164 npages = btop(bytes);
2164 2165
2165 2166 if (!kcage_on || panicstr) {
2166 2167 /*
2167 2168 * Cage is OFF, or we are single threaded in
2168 2169 * panic, so make everything a RELOC request.
2169 2170 */
2170 2171 flags &= ~PG_NORELOC;
2171 2172 }
2172 2173
2173 2174 /*
2174 2175 * Make sure there's adequate physical memory available.
2175 2176 * Note: PG_WAIT is ignored here.
2176 2177 */
2177 2178 if (freemem <= throttlefree + npages) {
2178 2179 VM_STAT_ADD(page_create_large_cnt[1]);
2179 2180 return (NULL);
2180 2181 }
2181 2182
2182 2183 /*
2183 2184 * If cage is on, dampen draw from cage when available
2184 2185 * cage space is low.
2185 2186 */
2186 2187 if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) &&
2187 2188 kcage_freemem < kcage_throttlefree + npages) {
2188 2189
2189 2190 /*
2190 2191 * The cage is on, the caller wants PG_NORELOC
2191 2192 * pages and available cage memory is very low.
2192 2193 * Call kcage_create_throttle() to attempt to
2193 2194 * control demand on the cage.
2194 2195 */
2195 2196 if (kcage_create_throttle(npages, flags) == KCT_FAILURE) {
2196 2197 VM_STAT_ADD(page_create_large_cnt[2]);
2197 2198 return (NULL);
2198 2199 }
2199 2200 }
2200 2201
2201 2202 if (!pcf_decrement_bucket(npages) &&
2202 2203 !pcf_decrement_multiple(NULL, npages, 1)) {
2203 2204 VM_STAT_ADD(page_create_large_cnt[4]);
2204 2205 return (NULL);
2205 2206 }
2206 2207
2207 2208 /*
2208 2209 * This is where this function behaves fundamentally differently
2209 2210 * than page_create_va(); since we're intending to map the page
2210 2211 * with a single TTE, we have to get it as a physically contiguous
2211 2212 * hardware pagesize chunk. If we can't, we fail.
2212 2213 */
2213 2214 if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max &&
2214 2215 LGRP_EXISTS(lgrp_table[*lgrpid]))
2215 2216 lgrp = lgrp_table[*lgrpid];
2216 2217 else
2217 2218 lgrp = lgrp_mem_choose(seg, vaddr, bytes);
2218 2219
2219 2220 if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr,
2220 2221 bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) {
2221 2222 page_create_putback(npages);
2222 2223 VM_STAT_ADD(page_create_large_cnt[5]);
2223 2224 return (NULL);
2224 2225 }
2225 2226
2226 2227 /*
2227 2228 * if we got the page with the wrong mtype give it back this is a
2228 2229 * workaround for CR 6249718. When CR 6249718 is fixed we never get
2229 2230 * inside "if" and the workaround becomes just a nop
2230 2231 */
2231 2232 if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) {
2232 2233 page_list_add_pages(rootpp, 0);
2233 2234 page_create_putback(npages);
2234 2235 VM_STAT_ADD(page_create_large_cnt[6]);
2235 2236 return (NULL);
2236 2237 }
2237 2238
2238 2239 /*
2239 2240 * If satisfying this request has left us with too little
2240 2241 * memory, start the wheels turning to get some back. The
2241 2242 * first clause of the test prevents waking up the pageout
2242 2243 * daemon in situations where it would decide that there's
2243 2244 * nothing to do.
2244 2245 */
2245 2246 if (nscan < desscan && freemem < minfree) {
2246 2247 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
2247 2248 "pageout_cv_signal:freemem %ld", freemem);
2248 2249 WAKE_PAGEOUT_SCANNER();
2249 2250 }
2250 2251
2251 2252 pp = rootpp;
2252 2253 while (npages--) {
2253 2254 ASSERT(PAGE_EXCL(pp));
2254 2255 ASSERT(pp->p_vnode == NULL);
2255 2256 ASSERT(!hat_page_is_mapped(pp));
2256 2257 PP_CLRFREE(pp);
2257 2258 PP_CLRAGED(pp);
2258 2259 if (!page_hashin(pp, vp, off, NULL))
2259 2260 panic("page_create_large: hashin failed: page %p",
2260 2261 (void *)pp);
2261 2262 page_io_lock(pp);
2262 2263 off += PAGESIZE;
2263 2264 pp = pp->p_next;
2264 2265 }
2265 2266
2266 2267 VM_STAT_ADD(page_create_large_cnt[0]);
2267 2268 return (rootpp);
2268 2269 }
2269 2270
2270 2271 page_t *
2271 2272 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
2272 2273 struct seg *seg, caddr_t vaddr)
2273 2274 {
2274 2275 page_t *plist = NULL;
2275 2276 pgcnt_t npages;
2276 2277 pgcnt_t found_on_free = 0;
2277 2278 pgcnt_t pages_req;
2278 2279 page_t *npp = NULL;
2279 2280 struct pcf *p;
2280 2281 lgrp_t *lgrp;
2281 2282
2282 2283 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
2283 2284 "page_create_start:vp %p off %llx bytes %lu flags %x",
2284 2285 vp, off, bytes, flags);
2285 2286
2286 2287 ASSERT(bytes != 0 && vp != NULL);
2287 2288
2288 2289 if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) {
2289 2290 panic("page_create: invalid flags");
2290 2291 /*NOTREACHED*/
2291 2292 }
2292 2293 ASSERT((flags & ~(PG_EXCL | PG_WAIT |
2293 2294 PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
2294 2295 /* but no others */
2295 2296
2296 2297 pages_req = npages = btopr(bytes);
2297 2298 /*
2298 2299 * Try to see whether request is too large to *ever* be
2299 2300 * satisfied, in order to prevent deadlock. We arbitrarily
2300 2301 * decide to limit maximum size requests to max_page_get.
2301 2302 */
2302 2303 if (npages >= max_page_get) {
2303 2304 if ((flags & PG_WAIT) == 0) {
2304 2305 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG,
2305 2306 "page_create_toobig:vp %p off %llx npages "
2306 2307 "%lu max_page_get %lu",
2307 2308 vp, off, npages, max_page_get);
2308 2309 return (NULL);
2309 2310 } else {
2310 2311 cmn_err(CE_WARN,
2311 2312 "Request for too much kernel memory "
2312 2313 "(%lu bytes), will hang forever", bytes);
2313 2314 for (;;)
2314 2315 delay(1000000000);
2315 2316 }
2316 2317 }
2317 2318
2318 2319 if (!kcage_on || panicstr) {
2319 2320 /*
2320 2321 * Cage is OFF, or we are single threaded in
2321 2322 * panic, so make everything a RELOC request.
2322 2323 */
2323 2324 flags &= ~PG_NORELOC;
2324 2325 }
2325 2326
2326 2327 if (freemem <= throttlefree + npages)
2327 2328 if (!page_create_throttle(npages, flags))
2328 2329 return (NULL);
2329 2330
2330 2331 /*
2331 2332 * If cage is on, dampen draw from cage when available
2332 2333 * cage space is low.
2333 2334 */
2334 2335 if ((flags & PG_NORELOC) &&
2335 2336 kcage_freemem < kcage_throttlefree + npages) {
2336 2337
2337 2338 /*
2338 2339 * The cage is on, the caller wants PG_NORELOC
2339 2340 * pages and available cage memory is very low.
2340 2341 * Call kcage_create_throttle() to attempt to
2341 2342 * control demand on the cage.
2342 2343 */
2343 2344 if (kcage_create_throttle(npages, flags) == KCT_FAILURE)
2344 2345 return (NULL);
2345 2346 }
2346 2347
2347 2348 VM_STAT_ADD(page_create_cnt[0]);
2348 2349
2349 2350 if (!pcf_decrement_bucket(npages)) {
2350 2351 /*
2351 2352 * Have to look harder. If npages is greater than
2352 2353 * one, then we might have to coalesce the counters.
2353 2354 *
2354 2355 * Go wait. We come back having accounted
2355 2356 * for the memory.
2356 2357 */
2357 2358 VM_STAT_ADD(page_create_cnt[1]);
2358 2359 if (!page_create_wait(npages, flags)) {
2359 2360 VM_STAT_ADD(page_create_cnt[2]);
2360 2361 return (NULL);
2361 2362 }
2362 2363 }
2363 2364
2364 2365 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
2365 2366 "page_create_success:vp %p off %llx", vp, off);
2366 2367
2367 2368 /*
2368 2369 * If satisfying this request has left us with too little
2369 2370 * memory, start the wheels turning to get some back. The
2370 2371 * first clause of the test prevents waking up the pageout
2371 2372 * daemon in situations where it would decide that there's
2372 2373 * nothing to do.
2373 2374 */
2374 2375 if (nscan < desscan && freemem < minfree) {
2375 2376 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
2376 2377 "pageout_cv_signal:freemem %ld", freemem);
2377 2378 WAKE_PAGEOUT_SCANNER();
2378 2379 }
2379 2380
2380 2381 /*
2381 2382 * Loop around collecting the requested number of pages.
2382 2383 * Most of the time, we have to `create' a new page. With
2383 2384 * this in mind, pull the page off the free list before
2384 2385 * getting the hash lock. This will minimize the hash
2385 2386 * lock hold time, nesting, and the like. If it turns
2386 2387 * out we don't need the page, we put it back at the end.
2387 2388 */
2388 2389 while (npages--) {
2389 2390 page_t *pp;
2390 2391 kmutex_t *phm = NULL;
2391 2392 ulong_t index;
2392 2393
2393 2394 index = PAGE_HASH_FUNC(vp, off);
2394 2395 top:
2395 2396 ASSERT(phm == NULL);
2396 2397 ASSERT(index == PAGE_HASH_FUNC(vp, off));
2397 2398 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
2398 2399
2399 2400 if (npp == NULL) {
2400 2401 /*
2401 2402 * Try to get a page from the freelist (ie,
2402 2403 * a page with no [vp, off] tag). If that
2403 2404 * fails, use the cachelist.
2404 2405 *
2405 2406 * During the first attempt at both the free
2406 2407 * and cache lists we try for the correct color.
2407 2408 */
2408 2409 /*
2409 2410 * XXXX-how do we deal with virtual indexed
2410 2411 * caches and and colors?
2411 2412 */
2412 2413 VM_STAT_ADD(page_create_cnt[4]);
2413 2414 /*
2414 2415 * Get lgroup to allocate next page of shared memory
2415 2416 * from and use it to specify where to allocate
2416 2417 * the physical memory
2417 2418 */
2418 2419 lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
2419 2420 npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
2420 2421 flags | PG_MATCH_COLOR, lgrp);
2421 2422 if (npp == NULL) {
2422 2423 npp = page_get_cachelist(vp, off, seg,
2423 2424 vaddr, flags | PG_MATCH_COLOR, lgrp);
2424 2425 if (npp == NULL) {
2425 2426 npp = page_create_get_something(vp,
2426 2427 off, seg, vaddr,
2427 2428 flags & ~PG_MATCH_COLOR);
2428 2429 }
2429 2430
2430 2431 if (PP_ISAGED(npp) == 0) {
2431 2432 /*
2432 2433 * Since this page came from the
2433 2434 * cachelist, we must destroy the
2434 2435 * old vnode association.
2435 2436 */
2436 2437 page_hashout(npp, NULL);
2437 2438 }
2438 2439 }
2439 2440 }
2440 2441
2441 2442 /*
2442 2443 * We own this page!
2443 2444 */
2444 2445 ASSERT(PAGE_EXCL(npp));
2445 2446 ASSERT(npp->p_vnode == NULL);
2446 2447 ASSERT(!hat_page_is_mapped(npp));
2447 2448 PP_CLRFREE(npp);
2448 2449 PP_CLRAGED(npp);
2449 2450
2450 2451 /*
2451 2452 * Here we have a page in our hot little mits and are
2452 2453 * just waiting to stuff it on the appropriate lists.
2453 2454 * Get the mutex and check to see if it really does
2454 2455 * not exist.
2455 2456 */
2456 2457 phm = PAGE_HASH_MUTEX(index);
2457 2458 mutex_enter(phm);
2458 2459 pp = page_hash_search(index, vp, off);
2459 2460 if (pp == NULL) {
2460 2461 VM_STAT_ADD(page_create_new);
2461 2462 pp = npp;
2462 2463 npp = NULL;
2463 2464 if (!page_hashin(pp, vp, off, phm)) {
2464 2465 /*
2465 2466 * Since we hold the page hash mutex and
2466 2467 * just searched for this page, page_hashin
2467 2468 * had better not fail. If it does, that
2468 2469 * means somethread did not follow the
2469 2470 * page hash mutex rules. Panic now and
2470 2471 * get it over with. As usual, go down
2471 2472 * holding all the locks.
2472 2473 */
2473 2474 ASSERT(MUTEX_HELD(phm));
2474 2475 panic("page_create: "
2475 2476 "hashin failed %p %p %llx %p",
2476 2477 (void *)pp, (void *)vp, off, (void *)phm);
2477 2478 /*NOTREACHED*/
2478 2479 }
2479 2480 ASSERT(MUTEX_HELD(phm));
2480 2481 mutex_exit(phm);
2481 2482 phm = NULL;
2482 2483
2483 2484 /*
2484 2485 * Hat layer locking need not be done to set
2485 2486 * the following bits since the page is not hashed
2486 2487 * and was on the free list (i.e., had no mappings).
2487 2488 *
2488 2489 * Set the reference bit to protect
2489 2490 * against immediate pageout
2490 2491 *
2491 2492 * XXXmh modify freelist code to set reference
2492 2493 * bit so we don't have to do it here.
2493 2494 */
2494 2495 page_set_props(pp, P_REF);
2495 2496 found_on_free++;
2496 2497 } else {
2497 2498 VM_STAT_ADD(page_create_exists);
2498 2499 if (flags & PG_EXCL) {
2499 2500 /*
2500 2501 * Found an existing page, and the caller
2501 2502 * wanted all new pages. Undo all of the work
2502 2503 * we have done.
2503 2504 */
2504 2505 mutex_exit(phm);
2505 2506 phm = NULL;
2506 2507 while (plist != NULL) {
2507 2508 pp = plist;
2508 2509 page_sub(&plist, pp);
2509 2510 page_io_unlock(pp);
2510 2511 /* large pages should not end up here */
2511 2512 ASSERT(pp->p_szc == 0);
2512 2513 /*LINTED: constant in conditional ctx*/
2513 2514 VN_DISPOSE(pp, B_INVAL, 0, kcred);
2514 2515 }
2515 2516 VM_STAT_ADD(page_create_found_one);
2516 2517 goto fail;
2517 2518 }
2518 2519 ASSERT(flags & PG_WAIT);
2519 2520 if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) {
2520 2521 /*
2521 2522 * Start all over again if we blocked trying
2522 2523 * to lock the page.
2523 2524 */
2524 2525 mutex_exit(phm);
2525 2526 VM_STAT_ADD(page_create_page_lock_failed);
2526 2527 phm = NULL;
2527 2528 goto top;
2528 2529 }
2529 2530 mutex_exit(phm);
2530 2531 phm = NULL;
2531 2532
2532 2533 if (PP_ISFREE(pp)) {
2533 2534 ASSERT(PP_ISAGED(pp) == 0);
2534 2535 VM_STAT_ADD(pagecnt.pc_get_cache);
2535 2536 page_list_sub(pp, PG_CACHE_LIST);
2536 2537 PP_CLRFREE(pp);
2537 2538 found_on_free++;
2538 2539 }
2539 2540 }
2540 2541
2541 2542 /*
2542 2543 * Got a page! It is locked. Acquire the i/o
2543 2544 * lock since we are going to use the p_next and
2544 2545 * p_prev fields to link the requested pages together.
2545 2546 */
2546 2547 page_io_lock(pp);
2547 2548 page_add(&plist, pp);
2548 2549 plist = plist->p_next;
2549 2550 off += PAGESIZE;
2550 2551 vaddr += PAGESIZE;
2551 2552 }
2552 2553
2553 2554 ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1);
2554 2555 fail:
2555 2556 if (npp != NULL) {
2556 2557 /*
2557 2558 * Did not need this page after all.
2558 2559 * Put it back on the free list.
2559 2560 */
2560 2561 VM_STAT_ADD(page_create_putbacks);
2561 2562 PP_SETFREE(npp);
2562 2563 PP_SETAGED(npp);
2563 2564 npp->p_offset = (u_offset_t)-1;
2564 2565 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
2565 2566 page_unlock(npp);
2566 2567
2567 2568 }
2568 2569
2569 2570 ASSERT(pages_req >= found_on_free);
2570 2571
2571 2572 {
2572 2573 uint_t overshoot = (uint_t)(pages_req - found_on_free);
2573 2574
2574 2575 if (overshoot) {
2575 2576 VM_STAT_ADD(page_create_overshoot);
2576 2577 p = &pcf[PCF_INDEX()];
2577 2578 mutex_enter(&p->pcf_lock);
2578 2579 if (p->pcf_block) {
2579 2580 p->pcf_reserve += overshoot;
2580 2581 } else {
2581 2582 p->pcf_count += overshoot;
2582 2583 if (p->pcf_wait) {
2583 2584 mutex_enter(&new_freemem_lock);
2584 2585 if (freemem_wait) {
2585 2586 cv_signal(&freemem_cv);
2586 2587 p->pcf_wait--;
2587 2588 } else {
2588 2589 p->pcf_wait = 0;
2589 2590 }
2590 2591 mutex_exit(&new_freemem_lock);
2591 2592 }
2592 2593 }
2593 2594 mutex_exit(&p->pcf_lock);
2594 2595 /* freemem is approximate, so this test OK */
2595 2596 if (!p->pcf_block)
2596 2597 freemem += overshoot;
2597 2598 }
2598 2599 }
2599 2600
2600 2601 return (plist);
2601 2602 }
2602 2603
2603 2604 /*
2604 2605 * One or more constituent pages of this large page has been marked
2605 2606 * toxic. Simply demote the large page to PAGESIZE pages and let
2606 2607 * page_free() handle it. This routine should only be called by
2607 2608 * large page free routines (page_free_pages() and page_destroy_pages().
2608 2609 * All pages are locked SE_EXCL and have already been marked free.
2609 2610 */
2610 2611 static void
2611 2612 page_free_toxic_pages(page_t *rootpp)
2612 2613 {
2613 2614 page_t *tpp;
2614 2615 pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc);
2615 2616 uint_t szc = rootpp->p_szc;
2616 2617
2617 2618 for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) {
2618 2619 ASSERT(tpp->p_szc == szc);
2619 2620 ASSERT((PAGE_EXCL(tpp) &&
2620 2621 !page_iolock_assert(tpp)) || panicstr);
2621 2622 tpp->p_szc = 0;
2622 2623 }
2623 2624
2624 2625 while (rootpp != NULL) {
2625 2626 tpp = rootpp;
2626 2627 page_sub(&rootpp, tpp);
2627 2628 ASSERT(PP_ISFREE(tpp));
2628 2629 PP_CLRFREE(tpp);
2629 2630 page_free(tpp, 1);
2630 2631 }
2631 2632 }
2632 2633
2633 2634 /*
2634 2635 * Put page on the "free" list.
2635 2636 * The free list is really two lists maintained by
2636 2637 * the PSM of whatever machine we happen to be on.
2637 2638 */
2638 2639 void
2639 2640 page_free(page_t *pp, int dontneed)
2640 2641 {
2641 2642 struct pcf *p;
2642 2643 uint_t pcf_index;
2643 2644
2644 2645 ASSERT((PAGE_EXCL(pp) &&
2645 2646 !page_iolock_assert(pp)) || panicstr);
2646 2647
2647 2648 if (PP_ISFREE(pp)) {
2648 2649 panic("page_free: page %p is free", (void *)pp);
2649 2650 }
2650 2651
2651 2652 if (pp->p_szc != 0) {
2652 2653 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
2653 2654 PP_ISKAS(pp)) {
2654 2655 panic("page_free: anon or kernel "
2655 2656 "or no vnode large page %p", (void *)pp);
2656 2657 }
2657 2658 page_demote_vp_pages(pp);
2658 2659 ASSERT(pp->p_szc == 0);
2659 2660 }
2660 2661
2661 2662 /*
2662 2663 * The page_struct_lock need not be acquired to examine these
2663 2664 * fields since the page has an "exclusive" lock.
2664 2665 */
2665 2666 if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
2666 2667 pp->p_slckcnt != 0) {
2667 2668 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d "
2668 2669 "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt,
2669 2670 pp->p_cowcnt, pp->p_slckcnt);
2670 2671 /*NOTREACHED*/
2671 2672 }
2672 2673
2673 2674 ASSERT(!hat_page_getshare(pp));
2674 2675
2675 2676 PP_SETFREE(pp);
2676 2677 ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) ||
2677 2678 !hat_ismod(pp));
2678 2679 page_clr_all_props(pp);
2679 2680 ASSERT(!hat_page_getshare(pp));
2680 2681
2681 2682 /*
2682 2683 * Now we add the page to the head of the free list.
2683 2684 * But if this page is associated with a paged vnode
2684 2685 * then we adjust the head forward so that the page is
2685 2686 * effectively at the end of the list.
2686 2687 */
2687 2688 if (pp->p_vnode == NULL) {
2688 2689 /*
2689 2690 * Page has no identity, put it on the free list.
2690 2691 */
2691 2692 PP_SETAGED(pp);
2692 2693 pp->p_offset = (u_offset_t)-1;
2693 2694 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2694 2695 VM_STAT_ADD(pagecnt.pc_free_free);
2695 2696 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2696 2697 "page_free_free:pp %p", pp);
2697 2698 } else {
2698 2699 PP_CLRAGED(pp);
2699 2700
2700 2701 if (!dontneed) {
2701 2702 /* move it to the tail of the list */
2702 2703 page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
2703 2704
2704 2705 VM_STAT_ADD(pagecnt.pc_free_cache);
2705 2706 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL,
2706 2707 "page_free_cache_tail:pp %p", pp);
2707 2708 } else {
2708 2709 page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
2709 2710
2710 2711 VM_STAT_ADD(pagecnt.pc_free_dontneed);
2711 2712 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD,
2712 2713 "page_free_cache_head:pp %p", pp);
2713 2714 }
2714 2715 }
2715 2716 page_unlock(pp);
2716 2717
2717 2718 /*
2718 2719 * Now do the `freemem' accounting.
2719 2720 */
2720 2721 pcf_index = PCF_INDEX();
2721 2722 p = &pcf[pcf_index];
2722 2723
2723 2724 mutex_enter(&p->pcf_lock);
2724 2725 if (p->pcf_block) {
2725 2726 p->pcf_reserve += 1;
2726 2727 } else {
2727 2728 p->pcf_count += 1;
2728 2729 if (p->pcf_wait) {
2729 2730 mutex_enter(&new_freemem_lock);
2730 2731 /*
2731 2732 * Check to see if some other thread
2732 2733 * is actually waiting. Another bucket
2733 2734 * may have woken it up by now. If there
2734 2735 * are no waiters, then set our pcf_wait
2735 2736 * count to zero to avoid coming in here
2736 2737 * next time. Also, since only one page
2737 2738 * was put on the free list, just wake
2738 2739 * up one waiter.
2739 2740 */
2740 2741 if (freemem_wait) {
2741 2742 cv_signal(&freemem_cv);
2742 2743 p->pcf_wait--;
2743 2744 } else {
2744 2745 p->pcf_wait = 0;
2745 2746 }
2746 2747 mutex_exit(&new_freemem_lock);
2747 2748 }
2748 2749 }
2749 2750 mutex_exit(&p->pcf_lock);
2750 2751
2751 2752 /* freemem is approximate, so this test OK */
2752 2753 if (!p->pcf_block)
2753 2754 freemem += 1;
2754 2755 }
2755 2756
2756 2757 /*
2757 2758 * Put page on the "free" list during intial startup.
2758 2759 * This happens during initial single threaded execution.
2759 2760 */
2760 2761 void
2761 2762 page_free_at_startup(page_t *pp)
2762 2763 {
2763 2764 struct pcf *p;
2764 2765 uint_t pcf_index;
2765 2766
2766 2767 page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT);
2767 2768 VM_STAT_ADD(pagecnt.pc_free_free);
2768 2769
2769 2770 /*
2770 2771 * Now do the `freemem' accounting.
2771 2772 */
2772 2773 pcf_index = PCF_INDEX();
2773 2774 p = &pcf[pcf_index];
2774 2775
2775 2776 ASSERT(p->pcf_block == 0);
2776 2777 ASSERT(p->pcf_wait == 0);
2777 2778 p->pcf_count += 1;
2778 2779
2779 2780 /* freemem is approximate, so this is OK */
2780 2781 freemem += 1;
2781 2782 }
2782 2783
2783 2784 void
2784 2785 page_free_pages(page_t *pp)
2785 2786 {
2786 2787 page_t *tpp, *rootpp = NULL;
2787 2788 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
2788 2789 pgcnt_t i;
2789 2790 uint_t szc = pp->p_szc;
2790 2791
2791 2792 VM_STAT_ADD(pagecnt.pc_free_pages);
2792 2793 TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2793 2794 "page_free_free:pp %p", pp);
2794 2795
2795 2796 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
2796 2797 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
2797 2798 panic("page_free_pages: not root page %p", (void *)pp);
2798 2799 /*NOTREACHED*/
2799 2800 }
2800 2801
2801 2802 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
2802 2803 ASSERT((PAGE_EXCL(tpp) &&
2803 2804 !page_iolock_assert(tpp)) || panicstr);
2804 2805 if (PP_ISFREE(tpp)) {
2805 2806 panic("page_free_pages: page %p is free", (void *)tpp);
2806 2807 /*NOTREACHED*/
2807 2808 }
2808 2809 if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 ||
2809 2810 tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) {
2810 2811 panic("page_free_pages %p", (void *)tpp);
2811 2812 /*NOTREACHED*/
2812 2813 }
2813 2814
2814 2815 ASSERT(!hat_page_getshare(tpp));
2815 2816 ASSERT(tpp->p_vnode == NULL);
2816 2817 ASSERT(tpp->p_szc == szc);
2817 2818
2818 2819 PP_SETFREE(tpp);
2819 2820 page_clr_all_props(tpp);
2820 2821 PP_SETAGED(tpp);
2821 2822 tpp->p_offset = (u_offset_t)-1;
2822 2823 ASSERT(tpp->p_next == tpp);
2823 2824 ASSERT(tpp->p_prev == tpp);
2824 2825 page_list_concat(&rootpp, &tpp);
2825 2826 }
2826 2827 ASSERT(rootpp == pp);
2827 2828
2828 2829 page_list_add_pages(rootpp, 0);
2829 2830 page_create_putback(pgcnt);
2830 2831 }
2831 2832
2832 2833 int free_pages = 1;
2833 2834
2834 2835 /*
2835 2836 * This routine attempts to return pages to the cachelist via page_release().
2836 2837 * It does not *have* to be successful in all cases, since the pageout scanner
2837 2838 * will catch any pages it misses. It does need to be fast and not introduce
2838 2839 * too much overhead.
2839 2840 *
2840 2841 * If a page isn't found on the unlocked sweep of the page_hash bucket, we
2841 2842 * don't lock and retry. This is ok, since the page scanner will eventually
2842 2843 * find any page we miss in free_vp_pages().
2843 2844 */
2844 2845 void
2845 2846 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len)
2846 2847 {
2847 2848 page_t *pp;
2848 2849 u_offset_t eoff;
2849 2850 extern int swap_in_range(vnode_t *, u_offset_t, size_t);
2850 2851
2851 2852 eoff = off + len;
2852 2853
2853 2854 if (free_pages == 0)
2854 2855 return;
2855 2856 if (swap_in_range(vp, off, len))
2856 2857 return;
2857 2858
2858 2859 for (; off < eoff; off += PAGESIZE) {
2859 2860
2860 2861 /*
2861 2862 * find the page using a fast, but inexact search. It'll be OK
2862 2863 * if a few pages slip through the cracks here.
2863 2864 */
2864 2865 pp = page_exists(vp, off);
2865 2866
2866 2867 /*
2867 2868 * If we didn't find the page (it may not exist), the page
2868 2869 * is free, looks still in use (shared), or we can't lock it,
2869 2870 * just give up.
2870 2871 */
2871 2872 if (pp == NULL ||
2872 2873 PP_ISFREE(pp) ||
2873 2874 page_share_cnt(pp) > 0 ||
2874 2875 !page_trylock(pp, SE_EXCL))
2875 2876 continue;
2876 2877
2877 2878 /*
2878 2879 * Once we have locked pp, verify that it's still the
2879 2880 * correct page and not already free
2880 2881 */
2881 2882 ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL));
2882 2883 if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) {
2883 2884 page_unlock(pp);
2884 2885 continue;
2885 2886 }
2886 2887
2887 2888 /*
2888 2889 * try to release the page...
2889 2890 */
2890 2891 (void) page_release(pp, 1);
2891 2892 }
2892 2893 }
2893 2894
2894 2895 /*
2895 2896 * Reclaim the given page from the free list.
2896 2897 * If pp is part of a large pages, only the given constituent page is reclaimed
2897 2898 * and the large page it belonged to will be demoted. This can only happen
2898 2899 * if the page is not on the cachelist.
2899 2900 *
2900 2901 * Returns 1 on success or 0 on failure.
2901 2902 *
2902 2903 * The page is unlocked if it can't be reclaimed (when freemem == 0).
2903 2904 * If `lock' is non-null, it will be dropped and re-acquired if
2904 2905 * the routine must wait while freemem is 0.
2905 2906 *
2906 2907 * As it turns out, boot_getpages() does this. It picks a page,
2907 2908 * based on where OBP mapped in some address, gets its pfn, searches
2908 2909 * the memsegs, locks the page, then pulls it off the free list!
2909 2910 */
2910 2911 int
2911 2912 page_reclaim(page_t *pp, kmutex_t *lock)
2912 2913 {
2913 2914 struct pcf *p;
2914 2915 struct cpu *cpup;
2915 2916 int enough;
2916 2917 uint_t i;
2917 2918
2918 2919 ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1);
2919 2920 ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp));
2920 2921
2921 2922 /*
2922 2923 * If `freemem' is 0, we cannot reclaim this page from the
2923 2924 * freelist, so release every lock we might hold: the page,
2924 2925 * and the `lock' before blocking.
2925 2926 *
2926 2927 * The only way `freemem' can become 0 while there are pages
2927 2928 * marked free (have their p->p_free bit set) is when the
2928 2929 * system is low on memory and doing a page_create(). In
2929 2930 * order to guarantee that once page_create() starts acquiring
2930 2931 * pages it will be able to get all that it needs since `freemem'
2931 2932 * was decreased by the requested amount. So, we need to release
2932 2933 * this page, and let page_create() have it.
2933 2934 *
2934 2935 * Since `freemem' being zero is not supposed to happen, just
2935 2936 * use the usual hash stuff as a starting point. If that bucket
2936 2937 * is empty, then assume the worst, and start at the beginning
2937 2938 * of the pcf array. If we always start at the beginning
2938 2939 * when acquiring more than one pcf lock, there won't be any
2939 2940 * deadlock problems.
2940 2941 */
2941 2942
2942 2943 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
2943 2944
2944 2945 if (freemem <= throttlefree && !page_create_throttle(1l, 0)) {
2945 2946 pcf_acquire_all();
2946 2947 goto page_reclaim_nomem;
2947 2948 }
2948 2949
2949 2950 enough = pcf_decrement_bucket(1);
2950 2951
2951 2952 if (!enough) {
2952 2953 VM_STAT_ADD(page_reclaim_zero);
2953 2954 /*
2954 2955 * Check again. Its possible that some other thread
2955 2956 * could have been right behind us, and added one
2956 2957 * to a list somewhere. Acquire each of the pcf locks
2957 2958 * until we find a page.
2958 2959 */
2959 2960 p = pcf;
2960 2961 for (i = 0; i < pcf_fanout; i++) {
2961 2962 mutex_enter(&p->pcf_lock);
2962 2963 if (p->pcf_count >= 1) {
2963 2964 p->pcf_count -= 1;
2964 2965 /*
2965 2966 * freemem is not protected by any lock. Thus,
2966 2967 * we cannot have any assertion containing
2967 2968 * freemem here.
2968 2969 */
2969 2970 freemem -= 1;
2970 2971 enough = 1;
2971 2972 break;
2972 2973 }
2973 2974 p++;
2974 2975 }
2975 2976
2976 2977 if (!enough) {
2977 2978 page_reclaim_nomem:
2978 2979 /*
2979 2980 * We really can't have page `pp'.
2980 2981 * Time for the no-memory dance with
2981 2982 * page_free(). This is just like
2982 2983 * page_create_wait(). Plus the added
2983 2984 * attraction of releasing whatever mutex
2984 2985 * we held when we were called with in `lock'.
2985 2986 * Page_unlock() will wakeup any thread
2986 2987 * waiting around for this page.
2987 2988 */
2988 2989 if (lock) {
2989 2990 VM_STAT_ADD(page_reclaim_zero_locked);
2990 2991 mutex_exit(lock);
2991 2992 }
2992 2993 page_unlock(pp);
2993 2994
2994 2995 /*
2995 2996 * get this before we drop all the pcf locks.
2996 2997 */
2997 2998 mutex_enter(&new_freemem_lock);
2998 2999
2999 3000 p = pcf;
3000 3001 for (i = 0; i < pcf_fanout; i++) {
3001 3002 p->pcf_wait++;
3002 3003 mutex_exit(&p->pcf_lock);
3003 3004 p++;
3004 3005 }
3005 3006
3006 3007 freemem_wait++;
3007 3008 cv_wait(&freemem_cv, &new_freemem_lock);
3008 3009 freemem_wait--;
3009 3010
3010 3011 mutex_exit(&new_freemem_lock);
3011 3012
3012 3013 if (lock) {
3013 3014 mutex_enter(lock);
3014 3015 }
3015 3016 return (0);
3016 3017 }
3017 3018
3018 3019 /*
3019 3020 * The pcf accounting has been done,
3020 3021 * though none of the pcf_wait flags have been set,
3021 3022 * drop the locks and continue on.
3022 3023 */
3023 3024 while (p >= pcf) {
3024 3025 mutex_exit(&p->pcf_lock);
3025 3026 p--;
3026 3027 }
3027 3028 }
3028 3029
3029 3030
3030 3031 VM_STAT_ADD(pagecnt.pc_reclaim);
3031 3032
3032 3033 /*
3033 3034 * page_list_sub will handle the case where pp is a large page.
3034 3035 * It's possible that the page was promoted while on the freelist
3035 3036 */
3036 3037 if (PP_ISAGED(pp)) {
3037 3038 page_list_sub(pp, PG_FREE_LIST);
3038 3039 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE,
3039 3040 "page_reclaim_free:pp %p", pp);
3040 3041 } else {
3041 3042 page_list_sub(pp, PG_CACHE_LIST);
3042 3043 TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE,
3043 3044 "page_reclaim_cache:pp %p", pp);
3044 3045 }
3045 3046
3046 3047 /*
3047 3048 * clear the p_free & p_age bits since this page is no longer
3048 3049 * on the free list. Notice that there was a brief time where
3049 3050 * a page is marked as free, but is not on the list.
3050 3051 *
3051 3052 * Set the reference bit to protect against immediate pageout.
3052 3053 */
3053 3054 PP_CLRFREE(pp);
3054 3055 PP_CLRAGED(pp);
3055 3056 page_set_props(pp, P_REF);
3056 3057
3057 3058 CPU_STATS_ENTER_K();
3058 3059 cpup = CPU; /* get cpup now that CPU cannot change */
3059 3060 CPU_STATS_ADDQ(cpup, vm, pgrec, 1);
3060 3061 CPU_STATS_ADDQ(cpup, vm, pgfrec, 1);
3061 3062 CPU_STATS_EXIT_K();
3062 3063 ASSERT(pp->p_szc == 0);
3063 3064
3064 3065 return (1);
3065 3066 }
3066 3067
3067 3068 /*
3068 3069 * Destroy identity of the page and put it back on
3069 3070 * the page free list. Assumes that the caller has
3070 3071 * acquired the "exclusive" lock on the page.
3071 3072 */
3072 3073 void
3073 3074 page_destroy(page_t *pp, int dontfree)
3074 3075 {
3075 3076 ASSERT((PAGE_EXCL(pp) &&
3076 3077 !page_iolock_assert(pp)) || panicstr);
3077 3078 ASSERT(pp->p_slckcnt == 0 || panicstr);
3078 3079
3079 3080 if (pp->p_szc != 0) {
3080 3081 if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
3081 3082 PP_ISKAS(pp)) {
3082 3083 panic("page_destroy: anon or kernel or no vnode "
3083 3084 "large page %p", (void *)pp);
3084 3085 }
3085 3086 page_demote_vp_pages(pp);
3086 3087 ASSERT(pp->p_szc == 0);
3087 3088 }
3088 3089
3089 3090 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp);
3090 3091
3091 3092 /*
3092 3093 * Unload translations, if any, then hash out the
3093 3094 * page to erase its identity.
3094 3095 */
3095 3096 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3096 3097 page_hashout(pp, NULL);
3097 3098
3098 3099 if (!dontfree) {
3099 3100 /*
3100 3101 * Acquire the "freemem_lock" for availrmem.
3101 3102 * The page_struct_lock need not be acquired for lckcnt
3102 3103 * and cowcnt since the page has an "exclusive" lock.
3103 3104 * We are doing a modified version of page_pp_unlock here.
3104 3105 */
3105 3106 if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) {
3106 3107 mutex_enter(&freemem_lock);
3107 3108 if (pp->p_lckcnt != 0) {
3108 3109 availrmem++;
3109 3110 pages_locked--;
3110 3111 pp->p_lckcnt = 0;
3111 3112 }
3112 3113 if (pp->p_cowcnt != 0) {
3113 3114 availrmem += pp->p_cowcnt;
3114 3115 pages_locked -= pp->p_cowcnt;
3115 3116 pp->p_cowcnt = 0;
3116 3117 }
3117 3118 mutex_exit(&freemem_lock);
3118 3119 }
3119 3120 /*
3120 3121 * Put the page on the "free" list.
3121 3122 */
3122 3123 page_free(pp, 0);
3123 3124 }
3124 3125 }
3125 3126
3126 3127 void
3127 3128 page_destroy_pages(page_t *pp)
3128 3129 {
3129 3130
3130 3131 page_t *tpp, *rootpp = NULL;
3131 3132 pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
3132 3133 pgcnt_t i, pglcks = 0;
3133 3134 uint_t szc = pp->p_szc;
3134 3135
3135 3136 ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
3136 3137
3137 3138 VM_STAT_ADD(pagecnt.pc_destroy_pages);
3138 3139
3139 3140 TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp);
3140 3141
3141 3142 if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
3142 3143 panic("page_destroy_pages: not root page %p", (void *)pp);
3143 3144 /*NOTREACHED*/
3144 3145 }
3145 3146
3146 3147 for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
3147 3148 ASSERT((PAGE_EXCL(tpp) &&
3148 3149 !page_iolock_assert(tpp)) || panicstr);
3149 3150 ASSERT(tpp->p_slckcnt == 0 || panicstr);
3150 3151 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
3151 3152 page_hashout(tpp, NULL);
3152 3153 ASSERT(tpp->p_offset == (u_offset_t)-1);
3153 3154 if (tpp->p_lckcnt != 0) {
3154 3155 pglcks++;
3155 3156 tpp->p_lckcnt = 0;
3156 3157 } else if (tpp->p_cowcnt != 0) {
3157 3158 pglcks += tpp->p_cowcnt;
3158 3159 tpp->p_cowcnt = 0;
3159 3160 }
3160 3161 ASSERT(!hat_page_getshare(tpp));
3161 3162 ASSERT(tpp->p_vnode == NULL);
3162 3163 ASSERT(tpp->p_szc == szc);
3163 3164
3164 3165 PP_SETFREE(tpp);
3165 3166 page_clr_all_props(tpp);
3166 3167 PP_SETAGED(tpp);
3167 3168 ASSERT(tpp->p_next == tpp);
3168 3169 ASSERT(tpp->p_prev == tpp);
3169 3170 page_list_concat(&rootpp, &tpp);
3170 3171 }
3171 3172
3172 3173 ASSERT(rootpp == pp);
3173 3174 if (pglcks != 0) {
3174 3175 mutex_enter(&freemem_lock);
3175 3176 availrmem += pglcks;
3176 3177 mutex_exit(&freemem_lock);
3177 3178 }
3178 3179
3179 3180 page_list_add_pages(rootpp, 0);
3180 3181 page_create_putback(pgcnt);
3181 3182 }
3182 3183
3183 3184 /*
3184 3185 * Similar to page_destroy(), but destroys pages which are
3185 3186 * locked and known to be on the page free list. Since
3186 3187 * the page is known to be free and locked, no one can access
3187 3188 * it.
3188 3189 *
3189 3190 * Also, the number of free pages does not change.
3190 3191 */
3191 3192 void
3192 3193 page_destroy_free(page_t *pp)
3193 3194 {
3194 3195 ASSERT(PAGE_EXCL(pp));
3195 3196 ASSERT(PP_ISFREE(pp));
3196 3197 ASSERT(pp->p_vnode);
3197 3198 ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0);
3198 3199 ASSERT(!hat_page_is_mapped(pp));
3199 3200 ASSERT(PP_ISAGED(pp) == 0);
3200 3201 ASSERT(pp->p_szc == 0);
3201 3202
3202 3203 VM_STAT_ADD(pagecnt.pc_destroy_free);
3203 3204 page_list_sub(pp, PG_CACHE_LIST);
3204 3205
3205 3206 page_hashout(pp, NULL);
3206 3207 ASSERT(pp->p_vnode == NULL);
3207 3208 ASSERT(pp->p_offset == (u_offset_t)-1);
3208 3209 ASSERT(pp->p_hash == NULL);
3209 3210
3210 3211 PP_SETAGED(pp);
3211 3212 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
3212 3213 page_unlock(pp);
3213 3214
3214 3215 mutex_enter(&new_freemem_lock);
3215 3216 if (freemem_wait) {
3216 3217 cv_signal(&freemem_cv);
3217 3218 }
3218 3219 mutex_exit(&new_freemem_lock);
3219 3220 }
3220 3221
3221 3222 /*
3222 3223 * Rename the page "opp" to have an identity specified
3223 3224 * by [vp, off]. If a page already exists with this name
3224 3225 * it is locked and destroyed. Note that the page's
3225 3226 * translations are not unloaded during the rename.
3226 3227 *
3227 3228 * This routine is used by the anon layer to "steal" the
3228 3229 * original page and is not unlike destroying a page and
3229 3230 * creating a new page using the same page frame.
3230 3231 *
3231 3232 * XXX -- Could deadlock if caller 1 tries to rename A to B while
3232 3233 * caller 2 tries to rename B to A.
3233 3234 */
3234 3235 void
3235 3236 page_rename(page_t *opp, vnode_t *vp, u_offset_t off)
3236 3237 {
3237 3238 page_t *pp;
3238 3239 int olckcnt = 0;
3239 3240 int ocowcnt = 0;
3240 3241 kmutex_t *phm;
3241 3242 ulong_t index;
3242 3243
3243 3244 ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp));
3244 3245 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3245 3246 ASSERT(PP_ISFREE(opp) == 0);
3246 3247
3247 3248 VM_STAT_ADD(page_rename_count);
3248 3249
3249 3250 TRACE_3(TR_FAC_VM, TR_PAGE_RENAME,
3250 3251 "page rename:pp %p vp %p off %llx", opp, vp, off);
3251 3252
3252 3253 /*
3253 3254 * CacheFS may call page_rename for a large NFS page
3254 3255 * when both CacheFS and NFS mount points are used
3255 3256 * by applications. Demote this large page before
3256 3257 * renaming it, to ensure that there are no "partial"
3257 3258 * large pages left lying around.
3258 3259 */
3259 3260 if (opp->p_szc != 0) {
3260 3261 vnode_t *ovp = opp->p_vnode;
3261 3262 ASSERT(ovp != NULL);
3262 3263 ASSERT(!IS_SWAPFSVP(ovp));
3263 3264 ASSERT(!VN_ISKAS(ovp));
3264 3265 page_demote_vp_pages(opp);
3265 3266 ASSERT(opp->p_szc == 0);
3266 3267 }
3267 3268
3268 3269 page_hashout(opp, NULL);
3269 3270 PP_CLRAGED(opp);
3270 3271
3271 3272 /*
3272 3273 * Acquire the appropriate page hash lock, since
3273 3274 * we're going to rename the page.
3274 3275 */
3275 3276 index = PAGE_HASH_FUNC(vp, off);
3276 3277 phm = PAGE_HASH_MUTEX(index);
3277 3278 mutex_enter(phm);
3278 3279 top:
3279 3280 /*
3280 3281 * Look for an existing page with this name and destroy it if found.
3281 3282 * By holding the page hash lock all the way to the page_hashin()
3282 3283 * call, we are assured that no page can be created with this
3283 3284 * identity. In the case when the phm lock is dropped to undo any
3284 3285 * hat layer mappings, the existing page is held with an "exclusive"
3285 3286 * lock, again preventing another page from being created with
3286 3287 * this identity.
3287 3288 */
3288 3289 pp = page_hash_search(index, vp, off);
3289 3290 if (pp != NULL) {
3290 3291 VM_STAT_ADD(page_rename_exists);
3291 3292
3292 3293 /*
3293 3294 * As it turns out, this is one of only two places where
3294 3295 * page_lock() needs to hold the passed in lock in the
3295 3296 * successful case. In all of the others, the lock could
3296 3297 * be dropped as soon as the attempt is made to lock
3297 3298 * the page. It is tempting to add yet another arguement,
3298 3299 * PL_KEEP or PL_DROP, to let page_lock know what to do.
3299 3300 */
3300 3301 if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) {
3301 3302 /*
3302 3303 * Went to sleep because the page could not
3303 3304 * be locked. We were woken up when the page
3304 3305 * was unlocked, or when the page was destroyed.
3305 3306 * In either case, `phm' was dropped while we
3306 3307 * slept. Hence we should not just roar through
3307 3308 * this loop.
3308 3309 */
3309 3310 goto top;
3310 3311 }
3311 3312
3312 3313 /*
3313 3314 * If an existing page is a large page, then demote
3314 3315 * it to ensure that no "partial" large pages are
3315 3316 * "created" after page_rename. An existing page
3316 3317 * can be a CacheFS page, and can't belong to swapfs.
3317 3318 */
3318 3319 if (hat_page_is_mapped(pp)) {
3319 3320 /*
3320 3321 * Unload translations. Since we hold the
3321 3322 * exclusive lock on this page, the page
3322 3323 * can not be changed while we drop phm.
3323 3324 * This is also not a lock protocol violation,
3324 3325 * but rather the proper way to do things.
3325 3326 */
3326 3327 mutex_exit(phm);
3327 3328 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3328 3329 if (pp->p_szc != 0) {
3329 3330 ASSERT(!IS_SWAPFSVP(vp));
3330 3331 ASSERT(!VN_ISKAS(vp));
3331 3332 page_demote_vp_pages(pp);
3332 3333 ASSERT(pp->p_szc == 0);
3333 3334 }
3334 3335 mutex_enter(phm);
3335 3336 } else if (pp->p_szc != 0) {
3336 3337 ASSERT(!IS_SWAPFSVP(vp));
3337 3338 ASSERT(!VN_ISKAS(vp));
3338 3339 mutex_exit(phm);
3339 3340 page_demote_vp_pages(pp);
3340 3341 ASSERT(pp->p_szc == 0);
3341 3342 mutex_enter(phm);
3342 3343 }
3343 3344 page_hashout(pp, phm);
3344 3345 }
3345 3346 /*
3346 3347 * Hash in the page with the new identity.
3347 3348 */
3348 3349 if (!page_hashin(opp, vp, off, phm)) {
3349 3350 /*
3350 3351 * We were holding phm while we searched for [vp, off]
3351 3352 * and only dropped phm if we found and locked a page.
3352 3353 * If we can't create this page now, then some thing
3353 3354 * is really broken.
3354 3355 */
3355 3356 panic("page_rename: Can't hash in page: %p", (void *)pp);
3356 3357 /*NOTREACHED*/
3357 3358 }
3358 3359
3359 3360 ASSERT(MUTEX_HELD(phm));
3360 3361 mutex_exit(phm);
3361 3362
3362 3363 /*
3363 3364 * Now that we have dropped phm, lets get around to finishing up
3364 3365 * with pp.
3365 3366 */
3366 3367 if (pp != NULL) {
3367 3368 ASSERT(!hat_page_is_mapped(pp));
3368 3369 /* for now large pages should not end up here */
3369 3370 ASSERT(pp->p_szc == 0);
3370 3371 /*
3371 3372 * Save the locks for transfer to the new page and then
3372 3373 * clear them so page_free doesn't think they're important.
3373 3374 * The page_struct_lock need not be acquired for lckcnt and
3374 3375 * cowcnt since the page has an "exclusive" lock.
3375 3376 */
3376 3377 olckcnt = pp->p_lckcnt;
3377 3378 ocowcnt = pp->p_cowcnt;
3378 3379 pp->p_lckcnt = pp->p_cowcnt = 0;
3379 3380
3380 3381 /*
3381 3382 * Put the page on the "free" list after we drop
3382 3383 * the lock. The less work under the lock the better.
3383 3384 */
3384 3385 /*LINTED: constant in conditional context*/
3385 3386 VN_DISPOSE(pp, B_FREE, 0, kcred);
3386 3387 }
3387 3388
3388 3389 /*
3389 3390 * Transfer the lock count from the old page (if any).
3390 3391 * The page_struct_lock need not be acquired for lckcnt and
3391 3392 * cowcnt since the page has an "exclusive" lock.
3392 3393 */
3393 3394 opp->p_lckcnt += olckcnt;
3394 3395 opp->p_cowcnt += ocowcnt;
3395 3396 }
3396 3397
3397 3398 /*
3398 3399 * low level routine to add page `pp' to the hash and vp chains for [vp, offset]
3399 3400 *
3400 3401 * Pages are normally inserted at the start of a vnode's v_pages list.
3401 3402 * If the vnode is VMODSORT and the page is modified, it goes at the end.
3402 3403 * This can happen when a modified page is relocated for DR.
3403 3404 *
3404 3405 * Returns 1 on success and 0 on failure.
3405 3406 */
3406 3407 static int
3407 3408 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset)
3408 3409 {
3409 3410 page_t **listp;
3410 3411 page_t *tp;
3411 3412 ulong_t index;
3412 3413
3413 3414 ASSERT(PAGE_EXCL(pp));
3414 3415 ASSERT(vp != NULL);
3415 3416 ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
3416 3417
3417 3418 /*
3418 3419 * Be sure to set these up before the page is inserted on the hash
3419 3420 * list. As soon as the page is placed on the list some other
3420 3421 * thread might get confused and wonder how this page could
3421 3422 * possibly hash to this list.
3422 3423 */
3423 3424 pp->p_vnode = vp;
3424 3425 pp->p_offset = offset;
3425 3426
3426 3427 /*
3427 3428 * record if this page is on a swap vnode
3428 3429 */
3429 3430 if ((vp->v_flag & VISSWAP) != 0)
3430 3431 PP_SETSWAP(pp);
3431 3432
3432 3433 index = PAGE_HASH_FUNC(vp, offset);
3433 3434 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index)));
3434 3435 listp = &page_hash[index];
3435 3436
3436 3437 /*
3437 3438 * If this page is already hashed in, fail this attempt to add it.
3438 3439 */
3439 3440 for (tp = *listp; tp != NULL; tp = tp->p_hash) {
3440 3441 if (tp->p_vnode == vp && tp->p_offset == offset) {
3441 3442 pp->p_vnode = NULL;
3442 3443 pp->p_offset = (u_offset_t)(-1);
3443 3444 return (0);
3444 3445 }
3445 3446 }
3446 3447 pp->p_hash = *listp;
3447 3448 *listp = pp;
3448 3449
3449 3450 /*
3450 3451 * Add the page to the vnode's list of pages
3451 3452 */
3452 3453 if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp))
3453 3454 listp = &vp->v_pages->p_vpprev->p_vpnext;
3454 3455 else
3455 3456 listp = &vp->v_pages;
3456 3457
3457 3458 page_vpadd(listp, pp);
3458 3459
3459 3460 return (1);
3460 3461 }
3461 3462
3462 3463 /*
3463 3464 * Add page `pp' to both the hash and vp chains for [vp, offset].
3464 3465 *
3465 3466 * Returns 1 on success and 0 on failure.
3466 3467 * If hold is passed in, it is not dropped.
3467 3468 */
3468 3469 int
3469 3470 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold)
3470 3471 {
3471 3472 kmutex_t *phm = NULL;
3472 3473 kmutex_t *vphm;
3473 3474 int rc;
3474 3475
3475 3476 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3476 3477 ASSERT(pp->p_fsdata == 0 || panicstr);
3477 3478
3478 3479 TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN,
3479 3480 "page_hashin:pp %p vp %p offset %llx",
3480 3481 pp, vp, offset);
3481 3482
3482 3483 VM_STAT_ADD(hashin_count);
3483 3484
3484 3485 if (hold != NULL)
3485 3486 phm = hold;
3486 3487 else {
3487 3488 VM_STAT_ADD(hashin_not_held);
3488 3489 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset));
3489 3490 mutex_enter(phm);
3490 3491 }
3491 3492
3492 3493 vphm = page_vnode_mutex(vp);
3493 3494 mutex_enter(vphm);
3494 3495 rc = page_do_hashin(pp, vp, offset);
3495 3496 mutex_exit(vphm);
3496 3497 if (hold == NULL)
3497 3498 mutex_exit(phm);
3498 3499 if (rc == 0)
3499 3500 VM_STAT_ADD(hashin_already);
3500 3501 return (rc);
3501 3502 }
3502 3503
3503 3504 /*
3504 3505 * Remove page ``pp'' from the hash and vp chains and remove vp association.
3505 3506 * All mutexes must be held
3506 3507 */
3507 3508 static void
3508 3509 page_do_hashout(page_t *pp)
3509 3510 {
3510 3511 page_t **hpp;
3511 3512 page_t *hp;
3512 3513 vnode_t *vp = pp->p_vnode;
3513 3514
3514 3515 ASSERT(vp != NULL);
3515 3516 ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
3516 3517
3517 3518 /*
3518 3519 * First, take pp off of its hash chain.
3519 3520 */
3520 3521 hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)];
3521 3522
3522 3523 for (;;) {
3523 3524 hp = *hpp;
3524 3525 if (hp == pp)
3525 3526 break;
3526 3527 if (hp == NULL) {
3527 3528 panic("page_do_hashout");
3528 3529 /*NOTREACHED*/
3529 3530 }
3530 3531 hpp = &hp->p_hash;
3531 3532 }
3532 3533 *hpp = pp->p_hash;
3533 3534
3534 3535 /*
3535 3536 * Now remove it from its associated vnode.
3536 3537 */
3537 3538 if (vp->v_pages)
3538 3539 page_vpsub(&vp->v_pages, pp);
3539 3540
3540 3541 pp->p_hash = NULL;
3541 3542 page_clr_all_props(pp);
3542 3543 PP_CLRSWAP(pp);
3543 3544 pp->p_vnode = NULL;
3544 3545 pp->p_offset = (u_offset_t)-1;
3545 3546 pp->p_fsdata = 0;
3546 3547 }
3547 3548
3548 3549 /*
3549 3550 * Remove page ``pp'' from the hash and vp chains and remove vp association.
3550 3551 *
3551 3552 * When `phm' is non-NULL it contains the address of the mutex protecting the
3552 3553 * hash list pp is on. It is not dropped.
3553 3554 */
3554 3555 void
3555 3556 page_hashout(page_t *pp, kmutex_t *phm)
3556 3557 {
3557 3558 vnode_t *vp;
3558 3559 ulong_t index;
3559 3560 kmutex_t *nphm;
3560 3561 kmutex_t *vphm;
3561 3562 kmutex_t *sep;
3562 3563
3563 3564 ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1);
3564 3565 ASSERT(pp->p_vnode != NULL);
3565 3566 ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
3566 3567 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode)));
3567 3568
3568 3569 vp = pp->p_vnode;
3569 3570
3570 3571 TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT,
3571 3572 "page_hashout:pp %p vp %p", pp, vp);
3572 3573
3573 3574 /* Kernel probe */
3574 3575 TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */,
3575 3576 tnf_opaque, vnode, vp,
3576 3577 tnf_offset, offset, pp->p_offset);
3577 3578
3578 3579 /*
3579 3580 *
3580 3581 */
3581 3582 VM_STAT_ADD(hashout_count);
3582 3583 index = PAGE_HASH_FUNC(vp, pp->p_offset);
3583 3584 if (phm == NULL) {
3584 3585 VM_STAT_ADD(hashout_not_held);
3585 3586 nphm = PAGE_HASH_MUTEX(index);
3586 3587 mutex_enter(nphm);
3587 3588 }
3588 3589 ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1);
3589 3590
3590 3591
3591 3592 /*
3592 3593 * grab page vnode mutex and remove it...
3593 3594 */
3594 3595 vphm = page_vnode_mutex(vp);
3595 3596 mutex_enter(vphm);
3596 3597
3597 3598 page_do_hashout(pp);
3598 3599
3599 3600 mutex_exit(vphm);
3600 3601 if (phm == NULL)
3601 3602 mutex_exit(nphm);
3602 3603
3603 3604 /*
3604 3605 * Wake up processes waiting for this page. The page's
3605 3606 * identity has been changed, and is probably not the
3606 3607 * desired page any longer.
3607 3608 */
3608 3609 sep = page_se_mutex(pp);
3609 3610 mutex_enter(sep);
3610 3611 pp->p_selock &= ~SE_EWANTED;
3611 3612 if (CV_HAS_WAITERS(&pp->p_cv))
3612 3613 cv_broadcast(&pp->p_cv);
3613 3614 mutex_exit(sep);
3614 3615 }
3615 3616
3616 3617 /*
3617 3618 * Add the page to the front of a linked list of pages
3618 3619 * using the p_next & p_prev pointers for the list.
3619 3620 * The caller is responsible for protecting the list pointers.
3620 3621 */
3621 3622 void
3622 3623 page_add(page_t **ppp, page_t *pp)
3623 3624 {
3624 3625 ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3625 3626
3626 3627 page_add_common(ppp, pp);
3627 3628 }
3628 3629
3629 3630
3630 3631
3631 3632 /*
3632 3633 * Common code for page_add() and mach_page_add()
3633 3634 */
3634 3635 void
3635 3636 page_add_common(page_t **ppp, page_t *pp)
3636 3637 {
3637 3638 if (*ppp == NULL) {
3638 3639 pp->p_next = pp->p_prev = pp;
3639 3640 } else {
3640 3641 pp->p_next = *ppp;
3641 3642 pp->p_prev = (*ppp)->p_prev;
3642 3643 (*ppp)->p_prev = pp;
3643 3644 pp->p_prev->p_next = pp;
3644 3645 }
3645 3646 *ppp = pp;
3646 3647 }
3647 3648
3648 3649
3649 3650 /*
3650 3651 * Remove this page from a linked list of pages
3651 3652 * using the p_next & p_prev pointers for the list.
3652 3653 *
3653 3654 * The caller is responsible for protecting the list pointers.
3654 3655 */
3655 3656 void
3656 3657 page_sub(page_t **ppp, page_t *pp)
3657 3658 {
3658 3659 ASSERT((PP_ISFREE(pp)) ? 1 :
3659 3660 (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3660 3661
3661 3662 if (*ppp == NULL || pp == NULL) {
3662 3663 panic("page_sub: bad arg(s): pp %p, *ppp %p",
3663 3664 (void *)pp, (void *)(*ppp));
3664 3665 /*NOTREACHED*/
3665 3666 }
3666 3667
3667 3668 page_sub_common(ppp, pp);
3668 3669 }
3669 3670
3670 3671
3671 3672 /*
3672 3673 * Common code for page_sub() and mach_page_sub()
3673 3674 */
3674 3675 void
3675 3676 page_sub_common(page_t **ppp, page_t *pp)
3676 3677 {
3677 3678 if (*ppp == pp)
3678 3679 *ppp = pp->p_next; /* go to next page */
3679 3680
3680 3681 if (*ppp == pp)
3681 3682 *ppp = NULL; /* page list is gone */
3682 3683 else {
3683 3684 pp->p_prev->p_next = pp->p_next;
3684 3685 pp->p_next->p_prev = pp->p_prev;
3685 3686 }
3686 3687 pp->p_prev = pp->p_next = pp; /* make pp a list of one */
3687 3688 }
3688 3689
3689 3690
3690 3691 /*
3691 3692 * Break page list cppp into two lists with npages in the first list.
3692 3693 * The tail is returned in nppp.
3693 3694 */
3694 3695 void
3695 3696 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages)
3696 3697 {
3697 3698 page_t *s1pp = *oppp;
3698 3699 page_t *s2pp;
3699 3700 page_t *e1pp, *e2pp;
3700 3701 long n = 0;
3701 3702
3702 3703 if (s1pp == NULL) {
3703 3704 *nppp = NULL;
3704 3705 return;
3705 3706 }
3706 3707 if (npages == 0) {
3707 3708 *nppp = s1pp;
3708 3709 *oppp = NULL;
3709 3710 return;
3710 3711 }
3711 3712 for (n = 0, s2pp = *oppp; n < npages; n++) {
3712 3713 s2pp = s2pp->p_next;
3713 3714 }
3714 3715 /* Fix head and tail of new lists */
3715 3716 e1pp = s2pp->p_prev;
3716 3717 e2pp = s1pp->p_prev;
3717 3718 s1pp->p_prev = e1pp;
3718 3719 e1pp->p_next = s1pp;
3719 3720 s2pp->p_prev = e2pp;
3720 3721 e2pp->p_next = s2pp;
3721 3722
3722 3723 /* second list empty */
3723 3724 if (s2pp == s1pp) {
3724 3725 *oppp = s1pp;
3725 3726 *nppp = NULL;
3726 3727 } else {
3727 3728 *oppp = s1pp;
3728 3729 *nppp = s2pp;
3729 3730 }
3730 3731 }
3731 3732
3732 3733 /*
3733 3734 * Concatenate page list nppp onto the end of list ppp.
3734 3735 */
3735 3736 void
3736 3737 page_list_concat(page_t **ppp, page_t **nppp)
3737 3738 {
3738 3739 page_t *s1pp, *s2pp, *e1pp, *e2pp;
3739 3740
3740 3741 if (*nppp == NULL) {
3741 3742 return;
3742 3743 }
3743 3744 if (*ppp == NULL) {
3744 3745 *ppp = *nppp;
3745 3746 return;
3746 3747 }
3747 3748 s1pp = *ppp;
3748 3749 e1pp = s1pp->p_prev;
3749 3750 s2pp = *nppp;
3750 3751 e2pp = s2pp->p_prev;
3751 3752 s1pp->p_prev = e2pp;
3752 3753 e2pp->p_next = s1pp;
3753 3754 e1pp->p_next = s2pp;
3754 3755 s2pp->p_prev = e1pp;
3755 3756 }
3756 3757
3757 3758 /*
3758 3759 * return the next page in the page list
3759 3760 */
3760 3761 page_t *
3761 3762 page_list_next(page_t *pp)
3762 3763 {
3763 3764 return (pp->p_next);
3764 3765 }
3765 3766
3766 3767
3767 3768 /*
3768 3769 * Add the page to the front of the linked list of pages
3769 3770 * using p_vpnext/p_vpprev pointers for the list.
3770 3771 *
3771 3772 * The caller is responsible for protecting the lists.
3772 3773 */
3773 3774 void
3774 3775 page_vpadd(page_t **ppp, page_t *pp)
3775 3776 {
3776 3777 if (*ppp == NULL) {
3777 3778 pp->p_vpnext = pp->p_vpprev = pp;
3778 3779 } else {
3779 3780 pp->p_vpnext = *ppp;
3780 3781 pp->p_vpprev = (*ppp)->p_vpprev;
3781 3782 (*ppp)->p_vpprev = pp;
3782 3783 pp->p_vpprev->p_vpnext = pp;
3783 3784 }
3784 3785 *ppp = pp;
3785 3786 }
3786 3787
3787 3788 /*
3788 3789 * Remove this page from the linked list of pages
3789 3790 * using p_vpnext/p_vpprev pointers for the list.
3790 3791 *
3791 3792 * The caller is responsible for protecting the lists.
3792 3793 */
3793 3794 void
3794 3795 page_vpsub(page_t **ppp, page_t *pp)
3795 3796 {
3796 3797 if (*ppp == NULL || pp == NULL) {
3797 3798 panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
3798 3799 (void *)pp, (void *)(*ppp));
3799 3800 /*NOTREACHED*/
3800 3801 }
3801 3802
3802 3803 if (*ppp == pp)
3803 3804 *ppp = pp->p_vpnext; /* go to next page */
3804 3805
3805 3806 if (*ppp == pp)
3806 3807 *ppp = NULL; /* page list is gone */
3807 3808 else {
3808 3809 pp->p_vpprev->p_vpnext = pp->p_vpnext;
3809 3810 pp->p_vpnext->p_vpprev = pp->p_vpprev;
3810 3811 }
3811 3812 pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */
3812 3813 }
3813 3814
3814 3815 /*
3815 3816 * Lock a physical page into memory "long term". Used to support "lock
3816 3817 * in memory" functions. Accepts the page to be locked, and a cow variable
3817 3818 * to indicate whether a the lock will travel to the new page during
3818 3819 * a potential copy-on-write.
3819 3820 */
3820 3821 int
3821 3822 page_pp_lock(
3822 3823 page_t *pp, /* page to be locked */
3823 3824 int cow, /* cow lock */
3824 3825 int kernel) /* must succeed -- ignore checking */
3825 3826 {
3826 3827 int r = 0; /* result -- assume failure */
3827 3828
3828 3829 ASSERT(PAGE_LOCKED(pp));
3829 3830
3830 3831 page_struct_lock(pp);
3831 3832 /*
3832 3833 * Acquire the "freemem_lock" for availrmem.
3833 3834 */
3834 3835 if (cow) {
3835 3836 mutex_enter(&freemem_lock);
3836 3837 if ((availrmem > pages_pp_maximum) &&
3837 3838 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
3838 3839 availrmem--;
3839 3840 pages_locked++;
3840 3841 mutex_exit(&freemem_lock);
3841 3842 r = 1;
3842 3843 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3843 3844 cmn_err(CE_WARN,
3844 3845 "COW lock limit reached on pfn 0x%lx",
3845 3846 page_pptonum(pp));
3846 3847 }
3847 3848 } else
3848 3849 mutex_exit(&freemem_lock);
3849 3850 } else {
3850 3851 if (pp->p_lckcnt) {
3851 3852 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
3852 3853 r = 1;
3853 3854 if (++pp->p_lckcnt ==
3854 3855 (ushort_t)PAGE_LOCK_MAXIMUM) {
3855 3856 cmn_err(CE_WARN, "Page lock limit "
3856 3857 "reached on pfn 0x%lx",
3857 3858 page_pptonum(pp));
3858 3859 }
3859 3860 }
3860 3861 } else {
3861 3862 if (kernel) {
3862 3863 /* availrmem accounting done by caller */
3863 3864 ++pp->p_lckcnt;
3864 3865 r = 1;
3865 3866 } else {
3866 3867 mutex_enter(&freemem_lock);
3867 3868 if (availrmem > pages_pp_maximum) {
3868 3869 availrmem--;
3869 3870 pages_locked++;
3870 3871 ++pp->p_lckcnt;
3871 3872 r = 1;
3872 3873 }
3873 3874 mutex_exit(&freemem_lock);
3874 3875 }
3875 3876 }
3876 3877 }
3877 3878 page_struct_unlock(pp);
3878 3879 return (r);
3879 3880 }
3880 3881
3881 3882 /*
3882 3883 * Decommit a lock on a physical page frame. Account for cow locks if
3883 3884 * appropriate.
3884 3885 */
3885 3886 void
3886 3887 page_pp_unlock(
3887 3888 page_t *pp, /* page to be unlocked */
3888 3889 int cow, /* expect cow lock */
3889 3890 int kernel) /* this was a kernel lock */
3890 3891 {
3891 3892 ASSERT(PAGE_LOCKED(pp));
3892 3893
3893 3894 page_struct_lock(pp);
3894 3895 /*
3895 3896 * Acquire the "freemem_lock" for availrmem.
3896 3897 * If cowcnt or lcknt is already 0 do nothing; i.e., we
3897 3898 * could be called to unlock even if nothing is locked. This could
3898 3899 * happen if locked file pages were truncated (removing the lock)
3899 3900 * and the file was grown again and new pages faulted in; the new
3900 3901 * pages are unlocked but the segment still thinks they're locked.
3901 3902 */
3902 3903 if (cow) {
3903 3904 if (pp->p_cowcnt) {
3904 3905 mutex_enter(&freemem_lock);
3905 3906 pp->p_cowcnt--;
3906 3907 availrmem++;
3907 3908 pages_locked--;
3908 3909 mutex_exit(&freemem_lock);
3909 3910 }
3910 3911 } else {
3911 3912 if (pp->p_lckcnt && --pp->p_lckcnt == 0) {
3912 3913 if (!kernel) {
3913 3914 mutex_enter(&freemem_lock);
3914 3915 availrmem++;
3915 3916 pages_locked--;
3916 3917 mutex_exit(&freemem_lock);
3917 3918 }
3918 3919 }
3919 3920 }
3920 3921 page_struct_unlock(pp);
3921 3922 }
3922 3923
3923 3924 /*
3924 3925 * This routine reserves availrmem for npages;
3925 3926 * flags: KM_NOSLEEP or KM_SLEEP
3926 3927 * returns 1 on success or 0 on failure
3927 3928 */
3928 3929 int
3929 3930 page_resv(pgcnt_t npages, uint_t flags)
3930 3931 {
3931 3932 mutex_enter(&freemem_lock);
3932 3933 while (availrmem < tune.t_minarmem + npages) {
3933 3934 if (flags & KM_NOSLEEP) {
3934 3935 mutex_exit(&freemem_lock);
3935 3936 return (0);
3936 3937 }
3937 3938 mutex_exit(&freemem_lock);
3938 3939 page_needfree(npages);
3939 3940 kmem_reap();
3940 3941 delay(hz >> 2);
3941 3942 page_needfree(-(spgcnt_t)npages);
3942 3943 mutex_enter(&freemem_lock);
3943 3944 }
3944 3945 availrmem -= npages;
3945 3946 mutex_exit(&freemem_lock);
3946 3947 return (1);
3947 3948 }
3948 3949
3949 3950 /*
3950 3951 * This routine unreserves availrmem for npages;
3951 3952 */
3952 3953 void
3953 3954 page_unresv(pgcnt_t npages)
3954 3955 {
3955 3956 mutex_enter(&freemem_lock);
3956 3957 availrmem += npages;
3957 3958 mutex_exit(&freemem_lock);
3958 3959 }
3959 3960
3960 3961 /*
3961 3962 * See Statement at the beginning of segvn_lockop() regarding
3962 3963 * the way we handle cowcnts and lckcnts.
3963 3964 *
3964 3965 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage
3965 3966 * that breaks COW has PROT_WRITE.
3966 3967 *
3967 3968 * Note that, we may also break COW in case we are softlocking
3968 3969 * on read access during physio;
3969 3970 * in this softlock case, the vpage may not have PROT_WRITE.
3970 3971 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp'
3971 3972 * if the vpage doesn't have PROT_WRITE.
3972 3973 *
3973 3974 * This routine is never called if we are stealing a page
3974 3975 * in anon_private.
3975 3976 *
3976 3977 * The caller subtracted from availrmem for read only mapping.
3977 3978 * if lckcnt is 1 increment availrmem.
3978 3979 */
3979 3980 void
3980 3981 page_pp_useclaim(
3981 3982 page_t *opp, /* original page frame losing lock */
3982 3983 page_t *npp, /* new page frame gaining lock */
3983 3984 uint_t write_perm) /* set if vpage has PROT_WRITE */
3984 3985 {
3985 3986 int payback = 0;
3986 3987 int nidx, oidx;
3987 3988
3988 3989 ASSERT(PAGE_LOCKED(opp));
3989 3990 ASSERT(PAGE_LOCKED(npp));
3990 3991
3991 3992 /*
3992 3993 * Since we have two pages we probably have two locks. We need to take
3993 3994 * them in a defined order to avoid deadlocks. It's also possible they
3994 3995 * both hash to the same lock in which case this is a non-issue.
3995 3996 */
3996 3997 nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp));
3997 3998 oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp));
3998 3999 if (nidx < oidx) {
3999 4000 page_struct_lock(npp);
4000 4001 page_struct_lock(opp);
4001 4002 } else if (oidx < nidx) {
4002 4003 page_struct_lock(opp);
4003 4004 page_struct_lock(npp);
4004 4005 } else { /* The pages hash to the same lock */
4005 4006 page_struct_lock(npp);
4006 4007 }
4007 4008
4008 4009 ASSERT(npp->p_cowcnt == 0);
4009 4010 ASSERT(npp->p_lckcnt == 0);
4010 4011
4011 4012 /* Don't use claim if nothing is locked (see page_pp_unlock above) */
4012 4013 if ((write_perm && opp->p_cowcnt != 0) ||
4013 4014 (!write_perm && opp->p_lckcnt != 0)) {
4014 4015
4015 4016 if (write_perm) {
4016 4017 npp->p_cowcnt++;
4017 4018 ASSERT(opp->p_cowcnt != 0);
4018 4019 opp->p_cowcnt--;
4019 4020 } else {
4020 4021
4021 4022 ASSERT(opp->p_lckcnt != 0);
4022 4023
4023 4024 /*
4024 4025 * We didn't need availrmem decremented if p_lckcnt on
4025 4026 * original page is 1. Here, we are unlocking
4026 4027 * read-only copy belonging to original page and
4027 4028 * are locking a copy belonging to new page.
4028 4029 */
4029 4030 if (opp->p_lckcnt == 1)
4030 4031 payback = 1;
4031 4032
4032 4033 npp->p_lckcnt++;
4033 4034 opp->p_lckcnt--;
4034 4035 }
4035 4036 }
4036 4037 if (payback) {
4037 4038 mutex_enter(&freemem_lock);
4038 4039 availrmem++;
4039 4040 pages_useclaim--;
4040 4041 mutex_exit(&freemem_lock);
4041 4042 }
4042 4043
4043 4044 if (nidx < oidx) {
4044 4045 page_struct_unlock(opp);
4045 4046 page_struct_unlock(npp);
4046 4047 } else if (oidx < nidx) {
4047 4048 page_struct_unlock(npp);
4048 4049 page_struct_unlock(opp);
4049 4050 } else { /* The pages hash to the same lock */
4050 4051 page_struct_unlock(npp);
4051 4052 }
4052 4053 }
4053 4054
4054 4055 /*
4055 4056 * Simple claim adjust functions -- used to support changes in
4056 4057 * claims due to changes in access permissions. Used by segvn_setprot().
4057 4058 */
4058 4059 int
4059 4060 page_addclaim(page_t *pp)
4060 4061 {
4061 4062 int r = 0; /* result */
4062 4063
4063 4064 ASSERT(PAGE_LOCKED(pp));
4064 4065
4065 4066 page_struct_lock(pp);
4066 4067 ASSERT(pp->p_lckcnt != 0);
4067 4068
4068 4069 if (pp->p_lckcnt == 1) {
4069 4070 if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
4070 4071 --pp->p_lckcnt;
4071 4072 r = 1;
4072 4073 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4073 4074 cmn_err(CE_WARN,
4074 4075 "COW lock limit reached on pfn 0x%lx",
4075 4076 page_pptonum(pp));
4076 4077 }
4077 4078 }
4078 4079 } else {
4079 4080 mutex_enter(&freemem_lock);
4080 4081 if ((availrmem > pages_pp_maximum) &&
4081 4082 (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
4082 4083 --availrmem;
4083 4084 ++pages_claimed;
4084 4085 mutex_exit(&freemem_lock);
4085 4086 --pp->p_lckcnt;
4086 4087 r = 1;
4087 4088 if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4088 4089 cmn_err(CE_WARN,
4089 4090 "COW lock limit reached on pfn 0x%lx",
4090 4091 page_pptonum(pp));
4091 4092 }
4092 4093 } else
4093 4094 mutex_exit(&freemem_lock);
4094 4095 }
4095 4096 page_struct_unlock(pp);
4096 4097 return (r);
4097 4098 }
4098 4099
4099 4100 int
4100 4101 page_subclaim(page_t *pp)
4101 4102 {
4102 4103 int r = 0;
4103 4104
4104 4105 ASSERT(PAGE_LOCKED(pp));
4105 4106
4106 4107 page_struct_lock(pp);
4107 4108 ASSERT(pp->p_cowcnt != 0);
4108 4109
4109 4110 if (pp->p_lckcnt) {
4110 4111 if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
4111 4112 r = 1;
4112 4113 /*
4113 4114 * for availrmem
4114 4115 */
4115 4116 mutex_enter(&freemem_lock);
4116 4117 availrmem++;
4117 4118 pages_claimed--;
4118 4119 mutex_exit(&freemem_lock);
4119 4120
4120 4121 pp->p_cowcnt--;
4121 4122
4122 4123 if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4123 4124 cmn_err(CE_WARN,
4124 4125 "Page lock limit reached on pfn 0x%lx",
4125 4126 page_pptonum(pp));
4126 4127 }
4127 4128 }
4128 4129 } else {
4129 4130 r = 1;
4130 4131 pp->p_cowcnt--;
4131 4132 pp->p_lckcnt++;
4132 4133 }
4133 4134 page_struct_unlock(pp);
4134 4135 return (r);
4135 4136 }
4136 4137
4137 4138 /*
4138 4139 * Variant of page_addclaim(), where ppa[] contains the pages of a single large
4139 4140 * page.
4140 4141 */
4141 4142 int
4142 4143 page_addclaim_pages(page_t **ppa)
4143 4144 {
4144 4145 pgcnt_t lckpgs = 0, pg_idx;
4145 4146
4146 4147 VM_STAT_ADD(pagecnt.pc_addclaim_pages);
4147 4148
4148 4149 /*
4149 4150 * Only need to take the page struct lock on the large page root.
4150 4151 */
4151 4152 page_struct_lock(ppa[0]);
4152 4153 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4153 4154
4154 4155 ASSERT(PAGE_LOCKED(ppa[pg_idx]));
4155 4156 ASSERT(ppa[pg_idx]->p_lckcnt != 0);
4156 4157 if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4157 4158 page_struct_unlock(ppa[0]);
4158 4159 return (0);
4159 4160 }
4160 4161 if (ppa[pg_idx]->p_lckcnt > 1)
4161 4162 lckpgs++;
4162 4163 }
4163 4164
4164 4165 if (lckpgs != 0) {
4165 4166 mutex_enter(&freemem_lock);
4166 4167 if (availrmem >= pages_pp_maximum + lckpgs) {
4167 4168 availrmem -= lckpgs;
4168 4169 pages_claimed += lckpgs;
4169 4170 } else {
4170 4171 mutex_exit(&freemem_lock);
4171 4172 page_struct_unlock(ppa[0]);
4172 4173 return (0);
4173 4174 }
4174 4175 mutex_exit(&freemem_lock);
4175 4176 }
4176 4177
4177 4178 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4178 4179 ppa[pg_idx]->p_lckcnt--;
4179 4180 ppa[pg_idx]->p_cowcnt++;
4180 4181 }
4181 4182 page_struct_unlock(ppa[0]);
4182 4183 return (1);
4183 4184 }
4184 4185
4185 4186 /*
4186 4187 * Variant of page_subclaim(), where ppa[] contains the pages of a single large
4187 4188 * page.
4188 4189 */
4189 4190 int
4190 4191 page_subclaim_pages(page_t **ppa)
4191 4192 {
4192 4193 pgcnt_t ulckpgs = 0, pg_idx;
4193 4194
4194 4195 VM_STAT_ADD(pagecnt.pc_subclaim_pages);
4195 4196
4196 4197 /*
4197 4198 * Only need to take the page struct lock on the large page root.
4198 4199 */
4199 4200 page_struct_lock(ppa[0]);
4200 4201 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4201 4202
4202 4203 ASSERT(PAGE_LOCKED(ppa[pg_idx]));
4203 4204 ASSERT(ppa[pg_idx]->p_cowcnt != 0);
4204 4205 if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4205 4206 page_struct_unlock(ppa[0]);
4206 4207 return (0);
4207 4208 }
4208 4209 if (ppa[pg_idx]->p_lckcnt != 0)
4209 4210 ulckpgs++;
4210 4211 }
4211 4212
4212 4213 if (ulckpgs != 0) {
4213 4214 mutex_enter(&freemem_lock);
4214 4215 availrmem += ulckpgs;
4215 4216 pages_claimed -= ulckpgs;
4216 4217 mutex_exit(&freemem_lock);
4217 4218 }
4218 4219
4219 4220 for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4220 4221 ppa[pg_idx]->p_cowcnt--;
4221 4222 ppa[pg_idx]->p_lckcnt++;
4222 4223
4223 4224 }
4224 4225 page_struct_unlock(ppa[0]);
4225 4226 return (1);
4226 4227 }
4227 4228
4228 4229 page_t *
4229 4230 page_numtopp(pfn_t pfnum, se_t se)
4230 4231 {
4231 4232 page_t *pp;
4232 4233
4233 4234 retry:
4234 4235 pp = page_numtopp_nolock(pfnum);
4235 4236 if (pp == NULL) {
4236 4237 return ((page_t *)NULL);
4237 4238 }
4238 4239
4239 4240 /*
4240 4241 * Acquire the appropriate lock on the page.
4241 4242 */
4242 4243 while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) {
4243 4244 if (page_pptonum(pp) != pfnum)
4244 4245 goto retry;
4245 4246 continue;
4246 4247 }
4247 4248
4248 4249 if (page_pptonum(pp) != pfnum) {
4249 4250 page_unlock(pp);
4250 4251 goto retry;
4251 4252 }
4252 4253
4253 4254 return (pp);
4254 4255 }
4255 4256
4256 4257 page_t *
4257 4258 page_numtopp_noreclaim(pfn_t pfnum, se_t se)
4258 4259 {
4259 4260 page_t *pp;
4260 4261
4261 4262 retry:
4262 4263 pp = page_numtopp_nolock(pfnum);
4263 4264 if (pp == NULL) {
4264 4265 return ((page_t *)NULL);
4265 4266 }
4266 4267
4267 4268 /*
4268 4269 * Acquire the appropriate lock on the page.
4269 4270 */
4270 4271 while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) {
4271 4272 if (page_pptonum(pp) != pfnum)
4272 4273 goto retry;
4273 4274 continue;
4274 4275 }
4275 4276
4276 4277 if (page_pptonum(pp) != pfnum) {
4277 4278 page_unlock(pp);
4278 4279 goto retry;
4279 4280 }
4280 4281
4281 4282 return (pp);
4282 4283 }
4283 4284
4284 4285 /*
4285 4286 * This routine is like page_numtopp, but will only return page structs
4286 4287 * for pages which are ok for loading into hardware using the page struct.
4287 4288 */
4288 4289 page_t *
4289 4290 page_numtopp_nowait(pfn_t pfnum, se_t se)
4290 4291 {
4291 4292 page_t *pp;
4292 4293
4293 4294 retry:
4294 4295 pp = page_numtopp_nolock(pfnum);
4295 4296 if (pp == NULL) {
4296 4297 return ((page_t *)NULL);
4297 4298 }
4298 4299
4299 4300 /*
4300 4301 * Try to acquire the appropriate lock on the page.
4301 4302 */
4302 4303 if (PP_ISFREE(pp))
4303 4304 pp = NULL;
4304 4305 else {
4305 4306 if (!page_trylock(pp, se))
4306 4307 pp = NULL;
4307 4308 else {
4308 4309 if (page_pptonum(pp) != pfnum) {
4309 4310 page_unlock(pp);
4310 4311 goto retry;
4311 4312 }
4312 4313 if (PP_ISFREE(pp)) {
4313 4314 page_unlock(pp);
4314 4315 pp = NULL;
4315 4316 }
4316 4317 }
4317 4318 }
4318 4319 return (pp);
4319 4320 }
4320 4321
4321 4322 /*
4322 4323 * Returns a count of dirty pages that are in the process
4323 4324 * of being written out. If 'cleanit' is set, try to push the page.
4324 4325 */
4325 4326 pgcnt_t
4326 4327 page_busy(int cleanit)
4327 4328 {
4328 4329 page_t *page0 = page_first();
4329 4330 page_t *pp = page0;
4330 4331 pgcnt_t nppbusy = 0;
4331 4332 u_offset_t off;
4332 4333
4333 4334 do {
4334 4335 vnode_t *vp = pp->p_vnode;
4335 4336 /*
4336 4337 * A page is a candidate for syncing if it is:
4337 4338 *
4338 4339 * (a) On neither the freelist nor the cachelist
4339 4340 * (b) Hashed onto a vnode
4340 4341 * (c) Not a kernel page
4341 4342 * (d) Dirty
4342 4343 * (e) Not part of a swapfile
4343 4344 * (f) a page which belongs to a real vnode; eg has a non-null
4344 4345 * v_vfsp pointer.
4345 4346 * (g) Backed by a filesystem which doesn't have a
4346 4347 * stubbed-out sync operation
4347 4348 */
4348 4349 if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) &&
4349 4350 hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL &&
4350 4351 vfs_can_sync(vp->v_vfsp)) {
4351 4352 nppbusy++;
4352 4353
4353 4354 if (!cleanit)
4354 4355 continue;
4355 4356 if (!page_trylock(pp, SE_EXCL))
4356 4357 continue;
4357 4358
4358 4359 if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) ||
4359 4360 pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
4360 4361 !(hat_pagesync(pp,
4361 4362 HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) {
4362 4363 page_unlock(pp);
4363 4364 continue;
4364 4365 }
4365 4366 off = pp->p_offset;
4366 4367 VN_HOLD(vp);
4367 4368 page_unlock(pp);
4368 4369 (void) VOP_PUTPAGE(vp, off, PAGESIZE,
4369 4370 B_ASYNC | B_FREE, kcred, NULL);
4370 4371 VN_RELE(vp);
4371 4372 }
4372 4373 } while ((pp = page_next(pp)) != page0);
4373 4374
4374 4375 return (nppbusy);
4375 4376 }
4376 4377
4377 4378 void page_invalidate_pages(void);
4378 4379
4379 4380 /*
4380 4381 * callback handler to vm sub-system
4381 4382 *
4382 4383 * callers make sure no recursive entries to this func.
4383 4384 */
4384 4385 /*ARGSUSED*/
4385 4386 boolean_t
4386 4387 callb_vm_cpr(void *arg, int code)
4387 4388 {
4388 4389 if (code == CB_CODE_CPR_CHKPT)
4389 4390 page_invalidate_pages();
4390 4391 return (B_TRUE);
4391 4392 }
4392 4393
4393 4394 /*
4394 4395 * Invalidate all pages of the system.
4395 4396 * It shouldn't be called until all user page activities are all stopped.
4396 4397 */
4397 4398 void
4398 4399 page_invalidate_pages()
4399 4400 {
4400 4401 page_t *pp;
4401 4402 page_t *page0;
4402 4403 pgcnt_t nbusypages;
4403 4404 int retry = 0;
4404 4405 const int MAXRETRIES = 4;
4405 4406 top:
4406 4407 /*
4407 4408 * Flush dirty pages and destroy the clean ones.
4408 4409 */
4409 4410 nbusypages = 0;
4410 4411
4411 4412 pp = page0 = page_first();
4412 4413 do {
4413 4414 struct vnode *vp;
4414 4415 u_offset_t offset;
4415 4416 int mod;
4416 4417
4417 4418 /*
4418 4419 * skip the page if it has no vnode or the page associated
4419 4420 * with the kernel vnode or prom allocated kernel mem.
4420 4421 */
4421 4422 if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp))
4422 4423 continue;
4423 4424
4424 4425 /*
4425 4426 * skip the page which is already free invalidated.
4426 4427 */
4427 4428 if (PP_ISFREE(pp) && PP_ISAGED(pp))
4428 4429 continue;
4429 4430
4430 4431 /*
4431 4432 * skip pages that are already locked or can't be "exclusively"
4432 4433 * locked or are already free. After we lock the page, check
4433 4434 * the free and age bits again to be sure it's not destroyed
4434 4435 * yet.
4435 4436 * To achieve max. parallelization, we use page_trylock instead
4436 4437 * of page_lock so that we don't get block on individual pages
4437 4438 * while we have thousands of other pages to process.
4438 4439 */
4439 4440 if (!page_trylock(pp, SE_EXCL)) {
4440 4441 nbusypages++;
4441 4442 continue;
4442 4443 } else if (PP_ISFREE(pp)) {
4443 4444 if (!PP_ISAGED(pp)) {
4444 4445 page_destroy_free(pp);
4445 4446 } else {
4446 4447 page_unlock(pp);
4447 4448 }
4448 4449 continue;
4449 4450 }
4450 4451 /*
4451 4452 * Is this page involved in some I/O? shared?
4452 4453 *
4453 4454 * The page_struct_lock need not be acquired to
4454 4455 * examine these fields since the page has an
4455 4456 * "exclusive" lock.
4456 4457 */
4457 4458 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
4458 4459 page_unlock(pp);
4459 4460 continue;
4460 4461 }
4461 4462
4462 4463 if (vp->v_type == VCHR) {
4463 4464 panic("vp->v_type == VCHR");
4464 4465 /*NOTREACHED*/
4465 4466 }
4466 4467
4467 4468 if (!page_try_demote_pages(pp)) {
4468 4469 page_unlock(pp);
4469 4470 continue;
4470 4471 }
4471 4472
4472 4473 /*
4473 4474 * Check the modified bit. Leave the bits alone in hardware
4474 4475 * (they will be modified if we do the putpage).
4475 4476 */
4476 4477 mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD)
4477 4478 & P_MOD);
4478 4479 if (mod) {
4479 4480 offset = pp->p_offset;
4480 4481 /*
4481 4482 * Hold the vnode before releasing the page lock
4482 4483 * to prevent it from being freed and re-used by
4483 4484 * some other thread.
4484 4485 */
4485 4486 VN_HOLD(vp);
4486 4487 page_unlock(pp);
4487 4488 /*
4488 4489 * No error return is checked here. Callers such as
4489 4490 * cpr deals with the dirty pages at the dump time
4490 4491 * if this putpage fails.
4491 4492 */
4492 4493 (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL,
4493 4494 kcred, NULL);
4494 4495 VN_RELE(vp);
4495 4496 } else {
4496 4497 /*LINTED: constant in conditional context*/
4497 4498 VN_DISPOSE(pp, B_INVAL, 0, kcred);
4498 4499 }
4499 4500 } while ((pp = page_next(pp)) != page0);
4500 4501 if (nbusypages && retry++ < MAXRETRIES) {
4501 4502 delay(1);
4502 4503 goto top;
4503 4504 }
4504 4505 }
4505 4506
4506 4507 /*
4507 4508 * Replace the page "old" with the page "new" on the page hash and vnode lists
4508 4509 *
4509 4510 * the replacement must be done in place, ie the equivalent sequence:
4510 4511 *
4511 4512 * vp = old->p_vnode;
4512 4513 * off = old->p_offset;
4513 4514 * page_do_hashout(old)
4514 4515 * page_do_hashin(new, vp, off)
4515 4516 *
4516 4517 * doesn't work, since
4517 4518 * 1) if old is the only page on the vnode, the v_pages list has a window
4518 4519 * where it looks empty. This will break file system assumptions.
4519 4520 * and
4520 4521 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list.
4521 4522 */
4522 4523 static void
4523 4524 page_do_relocate_hash(page_t *new, page_t *old)
4524 4525 {
4525 4526 page_t **hash_list;
4526 4527 vnode_t *vp = old->p_vnode;
4527 4528 kmutex_t *sep;
4528 4529
4529 4530 ASSERT(PAGE_EXCL(old));
4530 4531 ASSERT(PAGE_EXCL(new));
4531 4532 ASSERT(vp != NULL);
4532 4533 ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
4533 4534 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset))));
4534 4535
4535 4536 /*
4536 4537 * First find old page on the page hash list
4537 4538 */
4538 4539 hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)];
4539 4540
4540 4541 for (;;) {
4541 4542 if (*hash_list == old)
4542 4543 break;
4543 4544 if (*hash_list == NULL) {
4544 4545 panic("page_do_hashout");
4545 4546 /*NOTREACHED*/
4546 4547 }
4547 4548 hash_list = &(*hash_list)->p_hash;
4548 4549 }
4549 4550
4550 4551 /*
4551 4552 * update new and replace old with new on the page hash list
4552 4553 */
4553 4554 new->p_vnode = old->p_vnode;
4554 4555 new->p_offset = old->p_offset;
4555 4556 new->p_hash = old->p_hash;
4556 4557 *hash_list = new;
4557 4558
4558 4559 if ((new->p_vnode->v_flag & VISSWAP) != 0)
4559 4560 PP_SETSWAP(new);
4560 4561
4561 4562 /*
4562 4563 * replace old with new on the vnode's page list
4563 4564 */
4564 4565 if (old->p_vpnext == old) {
4565 4566 new->p_vpnext = new;
4566 4567 new->p_vpprev = new;
4567 4568 } else {
4568 4569 new->p_vpnext = old->p_vpnext;
4569 4570 new->p_vpprev = old->p_vpprev;
4570 4571 new->p_vpnext->p_vpprev = new;
4571 4572 new->p_vpprev->p_vpnext = new;
4572 4573 }
4573 4574 if (vp->v_pages == old)
4574 4575 vp->v_pages = new;
4575 4576
4576 4577 /*
4577 4578 * clear out the old page
4578 4579 */
4579 4580 old->p_hash = NULL;
4580 4581 old->p_vpnext = NULL;
4581 4582 old->p_vpprev = NULL;
4582 4583 old->p_vnode = NULL;
4583 4584 PP_CLRSWAP(old);
4584 4585 old->p_offset = (u_offset_t)-1;
4585 4586 page_clr_all_props(old);
4586 4587
4587 4588 /*
4588 4589 * Wake up processes waiting for this page. The page's
4589 4590 * identity has been changed, and is probably not the
4590 4591 * desired page any longer.
4591 4592 */
4592 4593 sep = page_se_mutex(old);
4593 4594 mutex_enter(sep);
4594 4595 old->p_selock &= ~SE_EWANTED;
4595 4596 if (CV_HAS_WAITERS(&old->p_cv))
4596 4597 cv_broadcast(&old->p_cv);
4597 4598 mutex_exit(sep);
4598 4599 }
4599 4600
4600 4601 /*
4601 4602 * This function moves the identity of page "pp_old" to page "pp_new".
4602 4603 * Both pages must be locked on entry. "pp_new" is free, has no identity,
4603 4604 * and need not be hashed out from anywhere.
4604 4605 */
4605 4606 void
4606 4607 page_relocate_hash(page_t *pp_new, page_t *pp_old)
4607 4608 {
4608 4609 vnode_t *vp = pp_old->p_vnode;
4609 4610 u_offset_t off = pp_old->p_offset;
4610 4611 kmutex_t *phm, *vphm;
4611 4612
4612 4613 /*
4613 4614 * Rehash two pages
4614 4615 */
4615 4616 ASSERT(PAGE_EXCL(pp_old));
4616 4617 ASSERT(PAGE_EXCL(pp_new));
4617 4618 ASSERT(vp != NULL);
4618 4619 ASSERT(pp_new->p_vnode == NULL);
4619 4620
4620 4621 /*
4621 4622 * hashout then hashin while holding the mutexes
4622 4623 */
4623 4624 phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off));
4624 4625 mutex_enter(phm);
4625 4626 vphm = page_vnode_mutex(vp);
4626 4627 mutex_enter(vphm);
4627 4628
4628 4629 page_do_relocate_hash(pp_new, pp_old);
4629 4630
4630 4631 /* The following comment preserved from page_flip(). */
4631 4632 pp_new->p_fsdata = pp_old->p_fsdata;
4632 4633 pp_old->p_fsdata = 0;
4633 4634 mutex_exit(vphm);
4634 4635 mutex_exit(phm);
4635 4636
4636 4637 /*
4637 4638 * The page_struct_lock need not be acquired for lckcnt and
4638 4639 * cowcnt since the page has an "exclusive" lock.
4639 4640 */
4640 4641 ASSERT(pp_new->p_lckcnt == 0);
4641 4642 ASSERT(pp_new->p_cowcnt == 0);
4642 4643 pp_new->p_lckcnt = pp_old->p_lckcnt;
4643 4644 pp_new->p_cowcnt = pp_old->p_cowcnt;
4644 4645 pp_old->p_lckcnt = pp_old->p_cowcnt = 0;
4645 4646
4646 4647 }
4647 4648
4648 4649 /*
4649 4650 * Helper routine used to lock all remaining members of a
4650 4651 * large page. The caller is responsible for passing in a locked
4651 4652 * pp. If pp is a large page, then it succeeds in locking all the
4652 4653 * remaining constituent pages or it returns with only the
4653 4654 * original page locked.
4654 4655 *
4655 4656 * Returns 1 on success, 0 on failure.
4656 4657 *
4657 4658 * If success is returned this routine guarantees p_szc for all constituent
4658 4659 * pages of a large page pp belongs to can't change. To achieve this we
4659 4660 * recheck szc of pp after locking all constituent pages and retry if szc
4660 4661 * changed (it could only decrease). Since hat_page_demote() needs an EXCL
4661 4662 * lock on one of constituent pages it can't be running after all constituent
4662 4663 * pages are locked. hat_page_demote() with a lock on a constituent page
4663 4664 * outside of this large page (i.e. pp belonged to a larger large page) is
4664 4665 * already done with all constituent pages of pp since the root's p_szc is
4665 4666 * changed last. Therefore no need to synchronize with hat_page_demote() that
4666 4667 * locked a constituent page outside of pp's current large page.
4667 4668 */
4668 4669 #ifdef DEBUG
4669 4670 uint32_t gpg_trylock_mtbf = 0;
4670 4671 #endif
4671 4672
4672 4673 int
4673 4674 group_page_trylock(page_t *pp, se_t se)
4674 4675 {
4675 4676 page_t *tpp;
4676 4677 pgcnt_t npgs, i, j;
4677 4678 uint_t pszc = pp->p_szc;
4678 4679
4679 4680 #ifdef DEBUG
4680 4681 if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) {
4681 4682 return (0);
4682 4683 }
4683 4684 #endif
4684 4685
4685 4686 if (pp != PP_GROUPLEADER(pp, pszc)) {
4686 4687 return (0);
4687 4688 }
4688 4689
4689 4690 retry:
4690 4691 ASSERT(PAGE_LOCKED_SE(pp, se));
4691 4692 ASSERT(!PP_ISFREE(pp));
4692 4693 if (pszc == 0) {
4693 4694 return (1);
4694 4695 }
4695 4696 npgs = page_get_pagecnt(pszc);
4696 4697 tpp = pp + 1;
4697 4698 for (i = 1; i < npgs; i++, tpp++) {
4698 4699 if (!page_trylock(tpp, se)) {
4699 4700 tpp = pp + 1;
4700 4701 for (j = 1; j < i; j++, tpp++) {
4701 4702 page_unlock(tpp);
4702 4703 }
4703 4704 return (0);
4704 4705 }
4705 4706 }
4706 4707 if (pp->p_szc != pszc) {
4707 4708 ASSERT(pp->p_szc < pszc);
4708 4709 ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) &&
4709 4710 !IS_SWAPFSVP(pp->p_vnode));
4710 4711 tpp = pp + 1;
4711 4712 for (i = 1; i < npgs; i++, tpp++) {
4712 4713 page_unlock(tpp);
4713 4714 }
4714 4715 pszc = pp->p_szc;
4715 4716 goto retry;
4716 4717 }
4717 4718 return (1);
4718 4719 }
4719 4720
4720 4721 void
4721 4722 group_page_unlock(page_t *pp)
4722 4723 {
4723 4724 page_t *tpp;
4724 4725 pgcnt_t npgs, i;
4725 4726
4726 4727 ASSERT(PAGE_LOCKED(pp));
4727 4728 ASSERT(!PP_ISFREE(pp));
4728 4729 ASSERT(pp == PP_PAGEROOT(pp));
4729 4730 npgs = page_get_pagecnt(pp->p_szc);
4730 4731 for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) {
4731 4732 page_unlock(tpp);
4732 4733 }
4733 4734 }
4734 4735
4735 4736 /*
4736 4737 * returns
4737 4738 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages
4738 4739 * ERANGE : this is not a base page
4739 4740 * EBUSY : failure to get locks on the page/pages
4740 4741 * ENOMEM : failure to obtain replacement pages
4741 4742 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel
4742 4743 * EIO : An error occurred while trying to copy the page data
4743 4744 *
4744 4745 * Return with all constituent members of target and replacement
4745 4746 * SE_EXCL locked. It is the callers responsibility to drop the
4746 4747 * locks.
4747 4748 */
4748 4749 int
4749 4750 do_page_relocate(
4750 4751 page_t **target,
4751 4752 page_t **replacement,
4752 4753 int grouplock,
4753 4754 spgcnt_t *nrelocp,
4754 4755 lgrp_t *lgrp)
4755 4756 {
4756 4757 page_t *first_repl;
4757 4758 page_t *repl;
4758 4759 page_t *targ;
4759 4760 page_t *pl = NULL;
4760 4761 uint_t ppattr;
4761 4762 pfn_t pfn, repl_pfn;
4762 4763 uint_t szc;
4763 4764 spgcnt_t npgs, i;
4764 4765 int repl_contig = 0;
4765 4766 uint_t flags = 0;
4766 4767 spgcnt_t dofree = 0;
4767 4768
4768 4769 *nrelocp = 0;
4769 4770
4770 4771 #if defined(__sparc)
4771 4772 /*
4772 4773 * We need to wait till OBP has completed
4773 4774 * its boot-time handoff of its resources to the kernel
4774 4775 * before we allow page relocation
4775 4776 */
4776 4777 if (page_relocate_ready == 0) {
4777 4778 return (EAGAIN);
4778 4779 }
4779 4780 #endif
4780 4781
4781 4782 /*
4782 4783 * If this is not a base page,
4783 4784 * just return with 0x0 pages relocated.
4784 4785 */
4785 4786 targ = *target;
4786 4787 ASSERT(PAGE_EXCL(targ));
4787 4788 ASSERT(!PP_ISFREE(targ));
4788 4789 szc = targ->p_szc;
4789 4790 ASSERT(szc < mmu_page_sizes);
4790 4791 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
4791 4792 pfn = targ->p_pagenum;
4792 4793 if (pfn != PFN_BASE(pfn, szc)) {
4793 4794 VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]);
4794 4795 return (ERANGE);
4795 4796 }
4796 4797
4797 4798 if ((repl = *replacement) != NULL && repl->p_szc >= szc) {
4798 4799 repl_pfn = repl->p_pagenum;
4799 4800 if (repl_pfn != PFN_BASE(repl_pfn, szc)) {
4800 4801 VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]);
4801 4802 return (ERANGE);
4802 4803 }
4803 4804 repl_contig = 1;
4804 4805 }
4805 4806
4806 4807 /*
4807 4808 * We must lock all members of this large page or we cannot
4808 4809 * relocate any part of it.
4809 4810 */
4810 4811 if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) {
4811 4812 VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]);
4812 4813 return (EBUSY);
4813 4814 }
4814 4815
4815 4816 /*
4816 4817 * reread szc it could have been decreased before
4817 4818 * group_page_trylock() was done.
4818 4819 */
4819 4820 szc = targ->p_szc;
4820 4821 ASSERT(szc < mmu_page_sizes);
4821 4822 VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
4822 4823 ASSERT(pfn == PFN_BASE(pfn, szc));
4823 4824
4824 4825 npgs = page_get_pagecnt(targ->p_szc);
4825 4826
4826 4827 if (repl == NULL) {
4827 4828 dofree = npgs; /* Size of target page in MMU pages */
4828 4829 if (!page_create_wait(dofree, 0)) {
4829 4830 if (grouplock != 0) {
4830 4831 group_page_unlock(targ);
4831 4832 }
4832 4833 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
4833 4834 return (ENOMEM);
4834 4835 }
4835 4836
4836 4837 /*
4837 4838 * seg kmem pages require that the target and replacement
4838 4839 * page be the same pagesize.
4839 4840 */
4840 4841 flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0;
4841 4842 repl = page_get_replacement_page(targ, lgrp, flags);
4842 4843 if (repl == NULL) {
4843 4844 if (grouplock != 0) {
4844 4845 group_page_unlock(targ);
4845 4846 }
4846 4847 page_create_putback(dofree);
4847 4848 VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
4848 4849 return (ENOMEM);
4849 4850 }
4850 4851 }
4851 4852 #ifdef DEBUG
4852 4853 else {
4853 4854 ASSERT(PAGE_LOCKED(repl));
4854 4855 }
4855 4856 #endif /* DEBUG */
4856 4857
4857 4858 #if defined(__sparc)
4858 4859 /*
4859 4860 * Let hat_page_relocate() complete the relocation if it's kernel page
4860 4861 */
4861 4862 if (VN_ISKAS(targ->p_vnode)) {
4862 4863 *replacement = repl;
4863 4864 if (hat_page_relocate(target, replacement, nrelocp) != 0) {
4864 4865 if (grouplock != 0) {
4865 4866 group_page_unlock(targ);
4866 4867 }
4867 4868 if (dofree) {
4868 4869 *replacement = NULL;
4869 4870 page_free_replacement_page(repl);
4870 4871 page_create_putback(dofree);
4871 4872 }
4872 4873 VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]);
4873 4874 return (EAGAIN);
4874 4875 }
4875 4876 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
4876 4877 return (0);
4877 4878 }
4878 4879 #else
4879 4880 #if defined(lint)
4880 4881 dofree = dofree;
4881 4882 #endif
4882 4883 #endif
4883 4884
4884 4885 first_repl = repl;
4885 4886
4886 4887 for (i = 0; i < npgs; i++) {
4887 4888 ASSERT(PAGE_EXCL(targ));
4888 4889 ASSERT(targ->p_slckcnt == 0);
4889 4890 ASSERT(repl->p_slckcnt == 0);
4890 4891
4891 4892 (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD);
4892 4893
4893 4894 ASSERT(hat_page_getshare(targ) == 0);
4894 4895 ASSERT(!PP_ISFREE(targ));
4895 4896 ASSERT(targ->p_pagenum == (pfn + i));
4896 4897 ASSERT(repl_contig == 0 ||
4897 4898 repl->p_pagenum == (repl_pfn + i));
4898 4899
4899 4900 /*
4900 4901 * Copy the page contents and attributes then
4901 4902 * relocate the page in the page hash.
4902 4903 */
4903 4904 if (ppcopy(targ, repl) == 0) {
4904 4905 targ = *target;
4905 4906 repl = first_repl;
4906 4907 VM_STAT_ADD(vmm_vmstats.ppr_copyfail);
4907 4908 if (grouplock != 0) {
4908 4909 group_page_unlock(targ);
4909 4910 }
4910 4911 if (dofree) {
4911 4912 *replacement = NULL;
4912 4913 page_free_replacement_page(repl);
4913 4914 page_create_putback(dofree);
4914 4915 }
4915 4916 return (EIO);
4916 4917 }
4917 4918
4918 4919 targ++;
4919 4920 if (repl_contig != 0) {
4920 4921 repl++;
4921 4922 } else {
4922 4923 repl = repl->p_next;
4923 4924 }
4924 4925 }
4925 4926
4926 4927 repl = first_repl;
4927 4928 targ = *target;
4928 4929
4929 4930 for (i = 0; i < npgs; i++) {
4930 4931 ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO));
4931 4932 page_clr_all_props(repl);
4932 4933 page_set_props(repl, ppattr);
4933 4934 page_relocate_hash(repl, targ);
4934 4935
4935 4936 ASSERT(hat_page_getshare(targ) == 0);
4936 4937 ASSERT(hat_page_getshare(repl) == 0);
4937 4938 /*
4938 4939 * Now clear the props on targ, after the
4939 4940 * page_relocate_hash(), they no longer
4940 4941 * have any meaning.
4941 4942 */
4942 4943 page_clr_all_props(targ);
4943 4944 ASSERT(targ->p_next == targ);
4944 4945 ASSERT(targ->p_prev == targ);
4945 4946 page_list_concat(&pl, &targ);
4946 4947
4947 4948 targ++;
4948 4949 if (repl_contig != 0) {
4949 4950 repl++;
4950 4951 } else {
4951 4952 repl = repl->p_next;
4952 4953 }
4953 4954 }
4954 4955 /* assert that we have come full circle with repl */
4955 4956 ASSERT(repl_contig == 1 || first_repl == repl);
4956 4957
4957 4958 *target = pl;
4958 4959 if (*replacement == NULL) {
4959 4960 ASSERT(first_repl == repl);
4960 4961 *replacement = repl;
4961 4962 }
4962 4963 VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
4963 4964 *nrelocp = npgs;
4964 4965 return (0);
4965 4966 }
4966 4967 /*
4967 4968 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated.
4968 4969 */
4969 4970 int
4970 4971 page_relocate(
4971 4972 page_t **target,
4972 4973 page_t **replacement,
4973 4974 int grouplock,
4974 4975 int freetarget,
4975 4976 spgcnt_t *nrelocp,
4976 4977 lgrp_t *lgrp)
4977 4978 {
4978 4979 spgcnt_t ret;
4979 4980
4980 4981 /* do_page_relocate returns 0 on success or errno value */
4981 4982 ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp);
4982 4983
4983 4984 if (ret != 0 || freetarget == 0) {
4984 4985 return (ret);
4985 4986 }
4986 4987 if (*nrelocp == 1) {
4987 4988 ASSERT(*target != NULL);
4988 4989 page_free(*target, 1);
4989 4990 } else {
4990 4991 page_t *tpp = *target;
4991 4992 uint_t szc = tpp->p_szc;
4992 4993 pgcnt_t npgs = page_get_pagecnt(szc);
4993 4994 ASSERT(npgs > 1);
4994 4995 ASSERT(szc != 0);
4995 4996 do {
4996 4997 ASSERT(PAGE_EXCL(tpp));
4997 4998 ASSERT(!hat_page_is_mapped(tpp));
4998 4999 ASSERT(tpp->p_szc == szc);
4999 5000 PP_SETFREE(tpp);
5000 5001 PP_SETAGED(tpp);
5001 5002 npgs--;
5002 5003 } while ((tpp = tpp->p_next) != *target);
5003 5004 ASSERT(npgs == 0);
5004 5005 page_list_add_pages(*target, 0);
5005 5006 npgs = page_get_pagecnt(szc);
5006 5007 page_create_putback(npgs);
5007 5008 }
5008 5009 return (ret);
5009 5010 }
5010 5011
5011 5012 /*
5012 5013 * it is up to the caller to deal with pcf accounting.
5013 5014 */
5014 5015 void
5015 5016 page_free_replacement_page(page_t *pplist)
5016 5017 {
5017 5018 page_t *pp;
5018 5019
5019 5020 while (pplist != NULL) {
5020 5021 /*
5021 5022 * pp_targ is a linked list.
5022 5023 */
5023 5024 pp = pplist;
5024 5025 if (pp->p_szc == 0) {
5025 5026 page_sub(&pplist, pp);
5026 5027 page_clr_all_props(pp);
5027 5028 PP_SETFREE(pp);
5028 5029 PP_SETAGED(pp);
5029 5030 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
5030 5031 page_unlock(pp);
5031 5032 VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]);
5032 5033 } else {
5033 5034 spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc);
5034 5035 page_t *tpp;
5035 5036 page_list_break(&pp, &pplist, curnpgs);
5036 5037 tpp = pp;
5037 5038 do {
5038 5039 ASSERT(PAGE_EXCL(tpp));
5039 5040 ASSERT(!hat_page_is_mapped(tpp));
5040 5041 page_clr_all_props(tpp);
5041 5042 PP_SETFREE(tpp);
5042 5043 PP_SETAGED(tpp);
5043 5044 } while ((tpp = tpp->p_next) != pp);
5044 5045 page_list_add_pages(pp, 0);
5045 5046 VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]);
5046 5047 }
5047 5048 }
5048 5049 }
5049 5050
5050 5051 /*
5051 5052 * Relocate target to non-relocatable replacement page.
5052 5053 */
5053 5054 int
5054 5055 page_relocate_cage(page_t **target, page_t **replacement)
5055 5056 {
5056 5057 page_t *tpp, *rpp;
5057 5058 spgcnt_t pgcnt, npgs;
5058 5059 int result;
5059 5060
5060 5061 tpp = *target;
5061 5062
5062 5063 ASSERT(PAGE_EXCL(tpp));
5063 5064 ASSERT(tpp->p_szc == 0);
5064 5065
5065 5066 pgcnt = btop(page_get_pagesize(tpp->p_szc));
5066 5067
5067 5068 do {
5068 5069 (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC);
5069 5070 rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC);
5070 5071 if (rpp == NULL) {
5071 5072 page_create_putback(pgcnt);
5072 5073 kcage_cageout_wakeup();
5073 5074 }
5074 5075 } while (rpp == NULL);
5075 5076
5076 5077 ASSERT(PP_ISNORELOC(rpp));
5077 5078
5078 5079 result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL);
5079 5080
5080 5081 if (result == 0) {
5081 5082 *replacement = rpp;
5082 5083 if (pgcnt != npgs)
5083 5084 panic("page_relocate_cage: partial relocation");
5084 5085 }
5085 5086
5086 5087 return (result);
5087 5088 }
5088 5089
5089 5090 /*
5090 5091 * Release the page lock on a page, place on cachelist
5091 5092 * tail if no longer mapped. Caller can let us know if
5092 5093 * the page is known to be clean.
5093 5094 */
5094 5095 int
5095 5096 page_release(page_t *pp, int checkmod)
5096 5097 {
5097 5098 int status;
5098 5099
5099 5100 ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) &&
5100 5101 (pp->p_vnode != NULL));
5101 5102
5102 5103 if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) &&
5103 5104 ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) &&
5104 5105 pp->p_lckcnt == 0 && pp->p_cowcnt == 0 &&
5105 5106 !hat_page_is_mapped(pp)) {
5106 5107
5107 5108 /*
5108 5109 * If page is modified, unlock it
5109 5110 *
5110 5111 * (p_nrm & P_MOD) bit has the latest stuff because:
5111 5112 * (1) We found that this page doesn't have any mappings
5112 5113 * _after_ holding SE_EXCL and
5113 5114 * (2) We didn't drop SE_EXCL lock after the check in (1)
5114 5115 */
5115 5116 if (checkmod && hat_ismod(pp)) {
5116 5117 page_unlock(pp);
5117 5118 status = PGREL_MOD;
5118 5119 } else {
5119 5120 /*LINTED: constant in conditional context*/
5120 5121 VN_DISPOSE(pp, B_FREE, 0, kcred);
5121 5122 status = PGREL_CLEAN;
5122 5123 }
5123 5124 } else {
5124 5125 page_unlock(pp);
5125 5126 status = PGREL_NOTREL;
5126 5127 }
5127 5128 return (status);
5128 5129 }
5129 5130
5130 5131 /*
5131 5132 * Given a constituent page, try to demote the large page on the freelist.
5132 5133 *
5133 5134 * Returns nonzero if the page could be demoted successfully. Returns with
5134 5135 * the constituent page still locked.
5135 5136 */
5136 5137 int
5137 5138 page_try_demote_free_pages(page_t *pp)
5138 5139 {
5139 5140 page_t *rootpp = pp;
5140 5141 pfn_t pfn = page_pptonum(pp);
5141 5142 spgcnt_t npgs;
5142 5143 uint_t szc = pp->p_szc;
5143 5144
5144 5145 ASSERT(PP_ISFREE(pp));
5145 5146 ASSERT(PAGE_EXCL(pp));
5146 5147
5147 5148 /*
5148 5149 * Adjust rootpp and lock it, if `pp' is not the base
5149 5150 * constituent page.
5150 5151 */
5151 5152 npgs = page_get_pagecnt(pp->p_szc);
5152 5153 if (npgs == 1) {
5153 5154 return (0);
5154 5155 }
5155 5156
5156 5157 if (!IS_P2ALIGNED(pfn, npgs)) {
5157 5158 pfn = P2ALIGN(pfn, npgs);
5158 5159 rootpp = page_numtopp_nolock(pfn);
5159 5160 }
5160 5161
5161 5162 if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) {
5162 5163 return (0);
5163 5164 }
5164 5165
5165 5166 if (rootpp->p_szc != szc) {
5166 5167 if (pp != rootpp)
5167 5168 page_unlock(rootpp);
5168 5169 return (0);
5169 5170 }
5170 5171
5171 5172 page_demote_free_pages(rootpp);
5172 5173
5173 5174 if (pp != rootpp)
5174 5175 page_unlock(rootpp);
5175 5176
5176 5177 ASSERT(PP_ISFREE(pp));
5177 5178 ASSERT(PAGE_EXCL(pp));
5178 5179 return (1);
5179 5180 }
5180 5181
5181 5182 /*
5182 5183 * Given a constituent page, try to demote the large page.
5183 5184 *
5184 5185 * Returns nonzero if the page could be demoted successfully. Returns with
5185 5186 * the constituent page still locked.
5186 5187 */
5187 5188 int
5188 5189 page_try_demote_pages(page_t *pp)
5189 5190 {
5190 5191 page_t *tpp, *rootpp = pp;
5191 5192 pfn_t pfn = page_pptonum(pp);
5192 5193 spgcnt_t i, npgs;
5193 5194 uint_t szc = pp->p_szc;
5194 5195 vnode_t *vp = pp->p_vnode;
5195 5196
5196 5197 ASSERT(PAGE_EXCL(pp));
5197 5198
5198 5199 VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]);
5199 5200
5200 5201 if (pp->p_szc == 0) {
5201 5202 VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]);
5202 5203 return (1);
5203 5204 }
5204 5205
5205 5206 if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) {
5206 5207 VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]);
5207 5208 page_demote_vp_pages(pp);
5208 5209 ASSERT(pp->p_szc == 0);
5209 5210 return (1);
5210 5211 }
5211 5212
5212 5213 /*
5213 5214 * Adjust rootpp if passed in is not the base
5214 5215 * constituent page.
5215 5216 */
5216 5217 npgs = page_get_pagecnt(pp->p_szc);
5217 5218 ASSERT(npgs > 1);
5218 5219 if (!IS_P2ALIGNED(pfn, npgs)) {
5219 5220 pfn = P2ALIGN(pfn, npgs);
5220 5221 rootpp = page_numtopp_nolock(pfn);
5221 5222 VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]);
5222 5223 ASSERT(rootpp->p_vnode != NULL);
5223 5224 ASSERT(rootpp->p_szc == szc);
5224 5225 }
5225 5226
5226 5227 /*
5227 5228 * We can't demote kernel pages since we can't hat_unload()
5228 5229 * the mappings.
5229 5230 */
5230 5231 if (VN_ISKAS(rootpp->p_vnode))
5231 5232 return (0);
5232 5233
5233 5234 /*
5234 5235 * Attempt to lock all constituent pages except the page passed
5235 5236 * in since it's already locked.
5236 5237 */
5237 5238 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
5238 5239 ASSERT(!PP_ISFREE(tpp));
5239 5240 ASSERT(tpp->p_vnode != NULL);
5240 5241
5241 5242 if (tpp != pp && !page_trylock(tpp, SE_EXCL))
5242 5243 break;
5243 5244 ASSERT(tpp->p_szc == rootpp->p_szc);
5244 5245 ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i);
5245 5246 }
5246 5247
5247 5248 /*
5248 5249 * If we failed to lock them all then unlock what we have
5249 5250 * locked so far and bail.
5250 5251 */
5251 5252 if (i < npgs) {
5252 5253 tpp = rootpp;
5253 5254 while (i-- > 0) {
5254 5255 if (tpp != pp)
5255 5256 page_unlock(tpp);
5256 5257 tpp++;
5257 5258 }
5258 5259 VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]);
5259 5260 return (0);
5260 5261 }
5261 5262
5262 5263 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
5263 5264 ASSERT(PAGE_EXCL(tpp));
5264 5265 ASSERT(tpp->p_slckcnt == 0);
5265 5266 (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
5266 5267 tpp->p_szc = 0;
5267 5268 }
5268 5269
5269 5270 /*
5270 5271 * Unlock all pages except the page passed in.
5271 5272 */
5272 5273 for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
5273 5274 ASSERT(!hat_page_is_mapped(tpp));
5274 5275 if (tpp != pp)
5275 5276 page_unlock(tpp);
5276 5277 }
5277 5278
5278 5279 VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]);
5279 5280 return (1);
5280 5281 }
5281 5282
5282 5283 /*
5283 5284 * Called by page_free() and page_destroy() to demote the page size code
5284 5285 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero
5285 5286 * p_szc on free list, neither can we just clear p_szc of a single page_t
5286 5287 * within a large page since it will break other code that relies on p_szc
5287 5288 * being the same for all page_t's of a large page). Anonymous pages should
5288 5289 * never end up here because anon_map_getpages() cannot deal with p_szc
5289 5290 * changes after a single constituent page is locked. While anonymous or
5290 5291 * kernel large pages are demoted or freed the entire large page at a time
5291 5292 * with all constituent pages locked EXCL for the file system pages we
5292 5293 * have to be able to demote a large page (i.e. decrease all constituent pages
5293 5294 * p_szc) with only just an EXCL lock on one of constituent pages. The reason
5294 5295 * we can easily deal with anonymous page demotion the entire large page at a
5295 5296 * time is that those operation originate at address space level and concern
5296 5297 * the entire large page region with actual demotion only done when pages are
5297 5298 * not shared with any other processes (therefore we can always get EXCL lock
5298 5299 * on all anonymous constituent pages after clearing segment page
5299 5300 * cache). However file system pages can be truncated or invalidated at a
5300 5301 * PAGESIZE level from the file system side and end up in page_free() or
5301 5302 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed
5302 5303 * and therefore pageout should be able to demote a large page by EXCL locking
5303 5304 * any constituent page that is not under SOFTLOCK). In those cases we cannot
5304 5305 * rely on being able to lock EXCL all constituent pages.
5305 5306 *
5306 5307 * To prevent szc changes on file system pages one has to lock all constituent
5307 5308 * pages at least SHARED (or call page_szc_lock()). The only subsystem that
5308 5309 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to
5309 5310 * prevent szc changes is hat layer that uses its own page level mlist
5310 5311 * locks. hat assumes that szc doesn't change after mlist lock for a page is
5311 5312 * taken. Therefore we need to change szc under hat level locks if we only
5312 5313 * have an EXCL lock on a single constituent page and hat still references any
5313 5314 * of constituent pages. (Note we can't "ignore" hat layer by simply
5314 5315 * hat_pageunload() all constituent pages without having EXCL locks on all of
5315 5316 * constituent pages). We use hat_page_demote() call to safely demote szc of
5316 5317 * all constituent pages under hat locks when we only have an EXCL lock on one
5317 5318 * of constituent pages.
5318 5319 *
5319 5320 * This routine calls page_szc_lock() before calling hat_page_demote() to
5320 5321 * allow segvn in one special case not to lock all constituent pages SHARED
5321 5322 * before calling hat_memload_array() that relies on p_szc not changing even
5322 5323 * before hat level mlist lock is taken. In that case segvn uses
5323 5324 * page_szc_lock() to prevent hat_page_demote() changing p_szc values.
5324 5325 *
5325 5326 * Anonymous or kernel page demotion still has to lock all pages exclusively
5326 5327 * and do hat_pageunload() on all constituent pages before demoting the page
5327 5328 * therefore there's no need for anonymous or kernel page demotion to use
5328 5329 * hat_page_demote() mechanism.
5329 5330 *
5330 5331 * hat_page_demote() removes all large mappings that map pp and then decreases
5331 5332 * p_szc starting from the last constituent page of the large page. By working
5332 5333 * from the tail of a large page in pfn decreasing order allows one looking at
5333 5334 * the root page to know that hat_page_demote() is done for root's szc area.
5334 5335 * e.g. if a root page has szc 1 one knows it only has to lock all constituent
5335 5336 * pages within szc 1 area to prevent szc changes because hat_page_demote()
5336 5337 * that started on this page when it had szc > 1 is done for this szc 1 area.
5337 5338 *
5338 5339 * We are guaranteed that all constituent pages of pp's large page belong to
5339 5340 * the same vnode with the consecutive offsets increasing in the direction of
5340 5341 * the pfn i.e. the identity of constituent pages can't change until their
5341 5342 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove
5342 5343 * large mappings to pp even though we don't lock any constituent page except
5343 5344 * pp (i.e. we won't unload e.g. kernel locked page).
5344 5345 */
5345 5346 static void
5346 5347 page_demote_vp_pages(page_t *pp)
5347 5348 {
5348 5349 kmutex_t *mtx;
5349 5350
5350 5351 ASSERT(PAGE_EXCL(pp));
5351 5352 ASSERT(!PP_ISFREE(pp));
5352 5353 ASSERT(pp->p_vnode != NULL);
5353 5354 ASSERT(!IS_SWAPFSVP(pp->p_vnode));
5354 5355 ASSERT(!PP_ISKAS(pp));
5355 5356
5356 5357 VM_STAT_ADD(pagecnt.pc_demote_pages[0]);
5357 5358
5358 5359 mtx = page_szc_lock(pp);
5359 5360 if (mtx != NULL) {
5360 5361 hat_page_demote(pp);
5361 5362 mutex_exit(mtx);
5362 5363 }
5363 5364 ASSERT(pp->p_szc == 0);
5364 5365 }
5365 5366
5366 5367 /*
5367 5368 * Mark any existing pages for migration in the given range
5368 5369 */
5369 5370 void
5370 5371 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len,
5371 5372 struct anon_map *amp, ulong_t anon_index, vnode_t *vp,
5372 5373 u_offset_t vnoff, int rflag)
5373 5374 {
5374 5375 struct anon *ap;
5375 5376 vnode_t *curvp;
5376 5377 lgrp_t *from;
5377 5378 pgcnt_t nlocked;
5378 5379 u_offset_t off;
5379 5380 pfn_t pfn;
5380 5381 size_t pgsz;
5381 5382 size_t segpgsz;
5382 5383 pgcnt_t pages;
5383 5384 uint_t pszc;
5384 5385 page_t *pp0, *pp;
5385 5386 caddr_t va;
5386 5387 ulong_t an_idx;
5387 5388 anon_sync_obj_t cookie;
5388 5389
5389 5390 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5390 5391
5391 5392 /*
5392 5393 * Don't do anything if don't need to do lgroup optimizations
5393 5394 * on this system
5394 5395 */
5395 5396 if (!lgrp_optimizations())
5396 5397 return;
5397 5398
5398 5399 /*
5399 5400 * Align address and length to (potentially large) page boundary
5400 5401 */
5401 5402 segpgsz = page_get_pagesize(seg->s_szc);
5402 5403 addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz);
5403 5404 if (rflag)
5404 5405 len = P2ROUNDUP(len, segpgsz);
5405 5406
5406 5407 /*
5407 5408 * Do one (large) page at a time
5408 5409 */
5409 5410 va = addr;
5410 5411 while (va < addr + len) {
5411 5412 /*
5412 5413 * Lookup (root) page for vnode and offset corresponding to
5413 5414 * this virtual address
5414 5415 * Try anonmap first since there may be copy-on-write
5415 5416 * pages, but initialize vnode pointer and offset using
5416 5417 * vnode arguments just in case there isn't an amp.
5417 5418 */
5418 5419 curvp = vp;
5419 5420 off = vnoff + va - seg->s_base;
5420 5421 if (amp) {
5421 5422 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5422 5423 an_idx = anon_index + seg_page(seg, va);
5423 5424 anon_array_enter(amp, an_idx, &cookie);
5424 5425 ap = anon_get_ptr(amp->ahp, an_idx);
5425 5426 if (ap)
5426 5427 swap_xlate(ap, &curvp, &off);
5427 5428 anon_array_exit(&cookie);
5428 5429 ANON_LOCK_EXIT(&->a_rwlock);
5429 5430 }
5430 5431
5431 5432 pp = NULL;
5432 5433 if (curvp)
5433 5434 pp = page_lookup(curvp, off, SE_SHARED);
5434 5435
5435 5436 /*
5436 5437 * If there isn't a page at this virtual address,
5437 5438 * skip to next page
5438 5439 */
5439 5440 if (pp == NULL) {
5440 5441 va += PAGESIZE;
5441 5442 continue;
5442 5443 }
5443 5444
5444 5445 /*
5445 5446 * Figure out which lgroup this page is in for kstats
5446 5447 */
5447 5448 pfn = page_pptonum(pp);
5448 5449 from = lgrp_pfn_to_lgrp(pfn);
5449 5450
5450 5451 /*
5451 5452 * Get page size, and round up and skip to next page boundary
5452 5453 * if unaligned address
5453 5454 */
5454 5455 pszc = pp->p_szc;
5455 5456 pgsz = page_get_pagesize(pszc);
5456 5457 pages = btop(pgsz);
5457 5458 if (!IS_P2ALIGNED(va, pgsz) ||
5458 5459 !IS_P2ALIGNED(pfn, pages) ||
5459 5460 pgsz > segpgsz) {
5460 5461 pgsz = MIN(pgsz, segpgsz);
5461 5462 page_unlock(pp);
5462 5463 pages = btop(P2END((uintptr_t)va, pgsz) -
5463 5464 (uintptr_t)va);
5464 5465 va = (caddr_t)P2END((uintptr_t)va, pgsz);
5465 5466 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages);
5466 5467 continue;
5467 5468 }
5468 5469
5469 5470 /*
5470 5471 * Upgrade to exclusive lock on page
5471 5472 */
5472 5473 if (!page_tryupgrade(pp)) {
5473 5474 page_unlock(pp);
5474 5475 va += pgsz;
5475 5476 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
5476 5477 btop(pgsz));
5477 5478 continue;
5478 5479 }
5479 5480
5480 5481 pp0 = pp++;
5481 5482 nlocked = 1;
5482 5483
5483 5484 /*
5484 5485 * Lock constituent pages if this is large page
5485 5486 */
5486 5487 if (pages > 1) {
5487 5488 /*
5488 5489 * Lock all constituents except root page, since it
5489 5490 * should be locked already.
5490 5491 */
5491 5492 for (; nlocked < pages; nlocked++) {
5492 5493 if (!page_trylock(pp, SE_EXCL)) {
5493 5494 break;
5494 5495 }
5495 5496 if (PP_ISFREE(pp) ||
5496 5497 pp->p_szc != pszc) {
5497 5498 /*
5498 5499 * hat_page_demote() raced in with us.
5499 5500 */
5500 5501 ASSERT(!IS_SWAPFSVP(curvp));
5501 5502 page_unlock(pp);
5502 5503 break;
5503 5504 }
5504 5505 pp++;
5505 5506 }
5506 5507 }
5507 5508
5508 5509 /*
5509 5510 * If all constituent pages couldn't be locked,
5510 5511 * unlock pages locked so far and skip to next page.
5511 5512 */
5512 5513 if (nlocked < pages) {
5513 5514 while (pp0 < pp) {
5514 5515 page_unlock(pp0++);
5515 5516 }
5516 5517 va += pgsz;
5517 5518 lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
5518 5519 btop(pgsz));
5519 5520 continue;
5520 5521 }
5521 5522
5522 5523 /*
5523 5524 * hat_page_demote() can no longer happen
5524 5525 * since last cons page had the right p_szc after
5525 5526 * all cons pages were locked. all cons pages
5526 5527 * should now have the same p_szc.
5527 5528 */
5528 5529
5529 5530 /*
5530 5531 * All constituent pages locked successfully, so mark
5531 5532 * large page for migration and unload the mappings of
5532 5533 * constituent pages, so a fault will occur on any part of the
5533 5534 * large page
5534 5535 */
5535 5536 PP_SETMIGRATE(pp0);
5536 5537 while (pp0 < pp) {
5537 5538 (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD);
5538 5539 ASSERT(hat_page_getshare(pp0) == 0);
5539 5540 page_unlock(pp0++);
5540 5541 }
5541 5542 lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked);
5542 5543
5543 5544 va += pgsz;
5544 5545 }
5545 5546 }
5546 5547
5547 5548 /*
5548 5549 * Migrate any pages that have been marked for migration in the given range
5549 5550 */
5550 5551 void
5551 5552 page_migrate(
5552 5553 struct seg *seg,
5553 5554 caddr_t addr,
5554 5555 page_t **ppa,
5555 5556 pgcnt_t npages)
5556 5557 {
5557 5558 lgrp_t *from;
5558 5559 lgrp_t *to;
5559 5560 page_t *newpp;
5560 5561 page_t *pp;
5561 5562 pfn_t pfn;
5562 5563 size_t pgsz;
5563 5564 spgcnt_t page_cnt;
5564 5565 spgcnt_t i;
5565 5566 uint_t pszc;
5566 5567
5567 5568 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5568 5569
5569 5570 while (npages > 0) {
5570 5571 pp = *ppa;
5571 5572 pszc = pp->p_szc;
5572 5573 pgsz = page_get_pagesize(pszc);
5573 5574 page_cnt = btop(pgsz);
5574 5575
5575 5576 /*
5576 5577 * Check to see whether this page is marked for migration
5577 5578 *
5578 5579 * Assume that root page of large page is marked for
5579 5580 * migration and none of the other constituent pages
5580 5581 * are marked. This really simplifies clearing the
5581 5582 * migrate bit by not having to clear it from each
5582 5583 * constituent page.
5583 5584 *
5584 5585 * note we don't want to relocate an entire large page if
5585 5586 * someone is only using one subpage.
5586 5587 */
5587 5588 if (npages < page_cnt)
5588 5589 break;
5589 5590
5590 5591 /*
5591 5592 * Is it marked for migration?
5592 5593 */
5593 5594 if (!PP_ISMIGRATE(pp))
5594 5595 goto next;
5595 5596
5596 5597 /*
5597 5598 * Determine lgroups that page is being migrated between
5598 5599 */
5599 5600 pfn = page_pptonum(pp);
5600 5601 if (!IS_P2ALIGNED(pfn, page_cnt)) {
5601 5602 break;
5602 5603 }
5603 5604 from = lgrp_pfn_to_lgrp(pfn);
5604 5605 to = lgrp_mem_choose(seg, addr, pgsz);
5605 5606
5606 5607 /*
5607 5608 * Need to get exclusive lock's to migrate
5608 5609 */
5609 5610 for (i = 0; i < page_cnt; i++) {
5610 5611 ASSERT(PAGE_LOCKED(ppa[i]));
5611 5612 if (page_pptonum(ppa[i]) != pfn + i ||
5612 5613 ppa[i]->p_szc != pszc) {
5613 5614 break;
5614 5615 }
5615 5616 if (!page_tryupgrade(ppa[i])) {
5616 5617 lgrp_stat_add(from->lgrp_id,
5617 5618 LGRP_PM_FAIL_LOCK_PGS,
5618 5619 page_cnt);
5619 5620 break;
5620 5621 }
5621 5622
5622 5623 /*
5623 5624 * Check to see whether we are trying to migrate
5624 5625 * page to lgroup where it is allocated already.
5625 5626 * If so, clear the migrate bit and skip to next
5626 5627 * page.
5627 5628 */
5628 5629 if (i == 0 && to == from) {
5629 5630 PP_CLRMIGRATE(ppa[0]);
5630 5631 page_downgrade(ppa[0]);
5631 5632 goto next;
5632 5633 }
5633 5634 }
5634 5635
5635 5636 /*
5636 5637 * If all constituent pages couldn't be locked,
5637 5638 * unlock pages locked so far and skip to next page.
5638 5639 */
5639 5640 if (i != page_cnt) {
5640 5641 while (--i != -1) {
5641 5642 page_downgrade(ppa[i]);
5642 5643 }
5643 5644 goto next;
5644 5645 }
5645 5646
5646 5647 (void) page_create_wait(page_cnt, PG_WAIT);
5647 5648 newpp = page_get_replacement_page(pp, to, PGR_SAMESZC);
5648 5649 if (newpp == NULL) {
5649 5650 page_create_putback(page_cnt);
5650 5651 for (i = 0; i < page_cnt; i++) {
5651 5652 page_downgrade(ppa[i]);
5652 5653 }
5653 5654 lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS,
5654 5655 page_cnt);
5655 5656 goto next;
5656 5657 }
5657 5658 ASSERT(newpp->p_szc == pszc);
5658 5659 /*
5659 5660 * Clear migrate bit and relocate page
5660 5661 */
5661 5662 PP_CLRMIGRATE(pp);
5662 5663 if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) {
5663 5664 panic("page_migrate: page_relocate failed");
5664 5665 }
5665 5666 ASSERT(page_cnt * PAGESIZE == pgsz);
5666 5667
5667 5668 /*
5668 5669 * Keep stats for number of pages migrated from and to
5669 5670 * each lgroup
5670 5671 */
5671 5672 lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt);
5672 5673 lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt);
5673 5674 /*
5674 5675 * update the page_t array we were passed in and
5675 5676 * unlink constituent pages of a large page.
5676 5677 */
5677 5678 for (i = 0; i < page_cnt; ++i, ++pp) {
5678 5679 ASSERT(PAGE_EXCL(newpp));
5679 5680 ASSERT(newpp->p_szc == pszc);
5680 5681 ppa[i] = newpp;
5681 5682 pp = newpp;
5682 5683 page_sub(&newpp, pp);
5683 5684 page_downgrade(pp);
5684 5685 }
5685 5686 ASSERT(newpp == NULL);
5686 5687 next:
5687 5688 addr += pgsz;
5688 5689 ppa += page_cnt;
5689 5690 npages -= page_cnt;
5690 5691 }
5691 5692 }
5692 5693
5693 5694 uint_t page_reclaim_maxcnt = 60; /* max total iterations */
5694 5695 uint_t page_reclaim_nofree_maxcnt = 3; /* max iterations without progress */
5695 5696 /*
5696 5697 * Reclaim/reserve availrmem for npages.
5697 5698 * If there is not enough memory start reaping seg, kmem caches.
5698 5699 * Start pageout scanner (via page_needfree()).
5699 5700 * Exit after ~ MAX_CNT s regardless of how much memory has been released.
5700 5701 * Note: There is no guarantee that any availrmem will be freed as
5701 5702 * this memory typically is locked (kernel heap) or reserved for swap.
5702 5703 * Also due to memory fragmentation kmem allocator may not be able
5703 5704 * to free any memory (single user allocated buffer will prevent
5704 5705 * freeing slab or a page).
5705 5706 */
5706 5707 int
5707 5708 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust)
5708 5709 {
5709 5710 int i = 0;
5710 5711 int i_nofree = 0;
5711 5712 int ret = 0;
5712 5713 pgcnt_t deficit;
5713 5714 pgcnt_t old_availrmem = 0;
5714 5715
5715 5716 mutex_enter(&freemem_lock);
5716 5717 while (availrmem < tune.t_minarmem + npages + epages &&
5717 5718 i++ < page_reclaim_maxcnt) {
5718 5719 /* ensure we made some progress in the last few iterations */
5719 5720 if (old_availrmem < availrmem) {
5720 5721 old_availrmem = availrmem;
5721 5722 i_nofree = 0;
5722 5723 } else if (i_nofree++ >= page_reclaim_nofree_maxcnt) {
5723 5724 break;
5724 5725 }
5725 5726
5726 5727 deficit = tune.t_minarmem + npages + epages - availrmem;
5727 5728 mutex_exit(&freemem_lock);
5728 5729 page_needfree(deficit);
5729 5730 kmem_reap();
5730 5731 delay(hz);
5731 5732 page_needfree(-(spgcnt_t)deficit);
5732 5733 mutex_enter(&freemem_lock);
5733 5734 }
5734 5735
5735 5736 if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) {
5736 5737 availrmem -= npages;
5737 5738 ret = 1;
5738 5739 }
5739 5740
5740 5741 mutex_exit(&freemem_lock);
5741 5742
5742 5743 return (ret);
5743 5744 }
5744 5745
5745 5746 /*
5746 5747 * Search the memory segments to locate the desired page. Within a
5747 5748 * segment, pages increase linearly with one page structure per
5748 5749 * physical page frame (size PAGESIZE). The search begins
5749 5750 * with the segment that was accessed last, to take advantage of locality.
5750 5751 * If the hint misses, we start from the beginning of the sorted memseg list
5751 5752 */
5752 5753
5753 5754
5754 5755 /*
5755 5756 * Some data structures for pfn to pp lookup.
5756 5757 */
5757 5758 ulong_t mhash_per_slot;
5758 5759 struct memseg *memseg_hash[N_MEM_SLOTS];
5759 5760
5760 5761 page_t *
5761 5762 page_numtopp_nolock(pfn_t pfnum)
5762 5763 {
5763 5764 struct memseg *seg;
5764 5765 page_t *pp;
5765 5766 vm_cpu_data_t *vc;
5766 5767
5767 5768 /*
5768 5769 * We need to disable kernel preemption while referencing the
5769 5770 * cpu_vm_data field in order to prevent us from being switched to
5770 5771 * another cpu and trying to reference it after it has been freed.
5771 5772 * This will keep us on cpu and prevent it from being removed while
5772 5773 * we are still on it.
5773 5774 *
5774 5775 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5775 5776 * which is being resued by DR who will flush those references
5776 5777 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5777 5778 */
5778 5779 kpreempt_disable();
5779 5780 vc = CPU->cpu_vm_data;
5780 5781 ASSERT(vc != NULL);
5781 5782
5782 5783 MEMSEG_STAT_INCR(nsearch);
5783 5784
5784 5785 /* Try last winner first */
5785 5786 if (((seg = vc->vc_pnum_memseg) != NULL) &&
5786 5787 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5787 5788 MEMSEG_STAT_INCR(nlastwon);
5788 5789 pp = seg->pages + (pfnum - seg->pages_base);
5789 5790 if (pp->p_pagenum == pfnum) {
5790 5791 kpreempt_enable();
5791 5792 return ((page_t *)pp);
5792 5793 }
5793 5794 }
5794 5795
5795 5796 /* Else Try hash */
5796 5797 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
5797 5798 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5798 5799 MEMSEG_STAT_INCR(nhashwon);
5799 5800 vc->vc_pnum_memseg = seg;
5800 5801 pp = seg->pages + (pfnum - seg->pages_base);
5801 5802 if (pp->p_pagenum == pfnum) {
5802 5803 kpreempt_enable();
5803 5804 return ((page_t *)pp);
5804 5805 }
5805 5806 }
5806 5807
5807 5808 /* Else Brute force */
5808 5809 for (seg = memsegs; seg != NULL; seg = seg->next) {
5809 5810 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5810 5811 vc->vc_pnum_memseg = seg;
5811 5812 pp = seg->pages + (pfnum - seg->pages_base);
5812 5813 if (pp->p_pagenum == pfnum) {
5813 5814 kpreempt_enable();
5814 5815 return ((page_t *)pp);
5815 5816 }
5816 5817 }
5817 5818 }
5818 5819 vc->vc_pnum_memseg = NULL;
5819 5820 kpreempt_enable();
5820 5821 MEMSEG_STAT_INCR(nnotfound);
5821 5822 return ((page_t *)NULL);
5822 5823
5823 5824 }
5824 5825
5825 5826 struct memseg *
5826 5827 page_numtomemseg_nolock(pfn_t pfnum)
5827 5828 {
5828 5829 struct memseg *seg;
5829 5830 page_t *pp;
5830 5831
5831 5832 /*
5832 5833 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5833 5834 * which is being resued by DR who will flush those references
5834 5835 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5835 5836 */
5836 5837 kpreempt_disable();
5837 5838 /* Try hash */
5838 5839 if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
5839 5840 (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5840 5841 pp = seg->pages + (pfnum - seg->pages_base);
5841 5842 if (pp->p_pagenum == pfnum) {
5842 5843 kpreempt_enable();
5843 5844 return (seg);
5844 5845 }
5845 5846 }
5846 5847
5847 5848 /* Else Brute force */
5848 5849 for (seg = memsegs; seg != NULL; seg = seg->next) {
5849 5850 if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5850 5851 pp = seg->pages + (pfnum - seg->pages_base);
5851 5852 if (pp->p_pagenum == pfnum) {
5852 5853 kpreempt_enable();
5853 5854 return (seg);
5854 5855 }
5855 5856 }
5856 5857 }
5857 5858 kpreempt_enable();
5858 5859 return ((struct memseg *)NULL);
5859 5860 }
5860 5861
5861 5862 /*
5862 5863 * Given a page and a count return the page struct that is
5863 5864 * n structs away from the current one in the global page
5864 5865 * list.
5865 5866 *
5866 5867 * This function wraps to the first page upon
5867 5868 * reaching the end of the memseg list.
5868 5869 */
5869 5870 page_t *
5870 5871 page_nextn(page_t *pp, ulong_t n)
5871 5872 {
5872 5873 struct memseg *seg;
5873 5874 page_t *ppn;
5874 5875 vm_cpu_data_t *vc;
5875 5876
5876 5877 /*
5877 5878 * We need to disable kernel preemption while referencing the
5878 5879 * cpu_vm_data field in order to prevent us from being switched to
5879 5880 * another cpu and trying to reference it after it has been freed.
5880 5881 * This will keep us on cpu and prevent it from being removed while
5881 5882 * we are still on it.
5882 5883 *
5883 5884 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5884 5885 * which is being resued by DR who will flush those references
5885 5886 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5886 5887 */
5887 5888 kpreempt_disable();
5888 5889 vc = (vm_cpu_data_t *)CPU->cpu_vm_data;
5889 5890
5890 5891 ASSERT(vc != NULL);
5891 5892
5892 5893 if (((seg = vc->vc_pnext_memseg) == NULL) ||
5893 5894 (seg->pages_base == seg->pages_end) ||
5894 5895 !(pp >= seg->pages && pp < seg->epages)) {
5895 5896
5896 5897 for (seg = memsegs; seg; seg = seg->next) {
5897 5898 if (pp >= seg->pages && pp < seg->epages)
5898 5899 break;
5899 5900 }
5900 5901
5901 5902 if (seg == NULL) {
5902 5903 /* Memory delete got in, return something valid. */
5903 5904 /* TODO: fix me. */
5904 5905 seg = memsegs;
5905 5906 pp = seg->pages;
5906 5907 }
5907 5908 }
5908 5909
5909 5910 /* check for wraparound - possible if n is large */
5910 5911 while ((ppn = (pp + n)) >= seg->epages || ppn < pp) {
5911 5912 n -= seg->epages - pp;
5912 5913 seg = seg->next;
5913 5914 if (seg == NULL)
5914 5915 seg = memsegs;
5915 5916 pp = seg->pages;
5916 5917 }
5917 5918 vc->vc_pnext_memseg = seg;
5918 5919 kpreempt_enable();
5919 5920 return (ppn);
5920 5921 }
5921 5922
5922 5923 /*
5923 5924 * Initialize for a loop using page_next_scan_large().
5924 5925 */
5925 5926 page_t *
5926 5927 page_next_scan_init(void **cookie)
5927 5928 {
5928 5929 ASSERT(cookie != NULL);
5929 5930 *cookie = (void *)memsegs;
5930 5931 return ((page_t *)memsegs->pages);
5931 5932 }
5932 5933
5933 5934 /*
5934 5935 * Return the next page in a scan of page_t's, assuming we want
5935 5936 * to skip over sub-pages within larger page sizes.
5936 5937 *
5937 5938 * The cookie is used to keep track of the current memseg.
5938 5939 */
5939 5940 page_t *
5940 5941 page_next_scan_large(
5941 5942 page_t *pp,
5942 5943 ulong_t *n,
5943 5944 void **cookie)
5944 5945 {
5945 5946 struct memseg *seg = (struct memseg *)*cookie;
5946 5947 page_t *new_pp;
5947 5948 ulong_t cnt;
5948 5949 pfn_t pfn;
5949 5950
5950 5951
5951 5952 /*
5952 5953 * get the count of page_t's to skip based on the page size
5953 5954 */
5954 5955 ASSERT(pp != NULL);
5955 5956 if (pp->p_szc == 0) {
5956 5957 cnt = 1;
5957 5958 } else {
5958 5959 pfn = page_pptonum(pp);
5959 5960 cnt = page_get_pagecnt(pp->p_szc);
5960 5961 cnt -= pfn & (cnt - 1);
5961 5962 }
5962 5963 *n += cnt;
5963 5964 new_pp = pp + cnt;
5964 5965
5965 5966 /*
5966 5967 * Catch if we went past the end of the current memory segment. If so,
5967 5968 * just move to the next segment with pages.
5968 5969 */
5969 5970 if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) {
5970 5971 do {
5971 5972 seg = seg->next;
5972 5973 if (seg == NULL)
5973 5974 seg = memsegs;
5974 5975 } while (seg->pages_base == seg->pages_end);
5975 5976 new_pp = seg->pages;
5976 5977 *cookie = (void *)seg;
5977 5978 }
5978 5979
5979 5980 return (new_pp);
5980 5981 }
5981 5982
5982 5983
5983 5984 /*
5984 5985 * Returns next page in list. Note: this function wraps
5985 5986 * to the first page in the list upon reaching the end
5986 5987 * of the list. Callers should be aware of this fact.
5987 5988 */
5988 5989
5989 5990 /* We should change this be a #define */
5990 5991
5991 5992 page_t *
5992 5993 page_next(page_t *pp)
5993 5994 {
5994 5995 return (page_nextn(pp, 1));
5995 5996 }
5996 5997
5997 5998 page_t *
5998 5999 page_first()
5999 6000 {
6000 6001 return ((page_t *)memsegs->pages);
6001 6002 }
6002 6003
6003 6004
6004 6005 /*
6005 6006 * This routine is called at boot with the initial memory configuration
6006 6007 * and when memory is added or removed.
6007 6008 */
6008 6009 void
6009 6010 build_pfn_hash()
6010 6011 {
6011 6012 pfn_t cur;
6012 6013 pgcnt_t index;
6013 6014 struct memseg *pseg;
6014 6015 int i;
6015 6016
6016 6017 /*
6017 6018 * Clear memseg_hash array.
6018 6019 * Since memory add/delete is designed to operate concurrently
6019 6020 * with normal operation, the hash rebuild must be able to run
6020 6021 * concurrently with page_numtopp_nolock(). To support this
6021 6022 * functionality, assignments to memseg_hash array members must
6022 6023 * be done atomically.
6023 6024 *
6024 6025 * NOTE: bzero() does not currently guarantee this for kernel
6025 6026 * threads, and cannot be used here.
6026 6027 */
6027 6028 for (i = 0; i < N_MEM_SLOTS; i++)
6028 6029 memseg_hash[i] = NULL;
6029 6030
6030 6031 hat_kpm_mseghash_clear(N_MEM_SLOTS);
6031 6032
6032 6033 /*
6033 6034 * Physmax is the last valid pfn.
6034 6035 */
6035 6036 mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT;
6036 6037 for (pseg = memsegs; pseg != NULL; pseg = pseg->next) {
6037 6038 index = MEMSEG_PFN_HASH(pseg->pages_base);
6038 6039 cur = pseg->pages_base;
6039 6040 do {
6040 6041 if (index >= N_MEM_SLOTS)
6041 6042 index = MEMSEG_PFN_HASH(cur);
6042 6043
6043 6044 if (memseg_hash[index] == NULL ||
6044 6045 memseg_hash[index]->pages_base > pseg->pages_base) {
6045 6046 memseg_hash[index] = pseg;
6046 6047 hat_kpm_mseghash_update(index, pseg);
6047 6048 }
6048 6049 cur += mhash_per_slot;
6049 6050 index++;
6050 6051 } while (cur < pseg->pages_end);
6051 6052 }
6052 6053 }
6053 6054
6054 6055 /*
6055 6056 * Return the pagenum for the pp
6056 6057 */
6057 6058 pfn_t
6058 6059 page_pptonum(page_t *pp)
6059 6060 {
6060 6061 return (pp->p_pagenum);
6061 6062 }
6062 6063
6063 6064 /*
6064 6065 * interface to the referenced and modified etc bits
6065 6066 * in the PSM part of the page struct
6066 6067 * when no locking is desired.
6067 6068 */
6068 6069 void
6069 6070 page_set_props(page_t *pp, uint_t flags)
6070 6071 {
6071 6072 ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0);
6072 6073 pp->p_nrm |= (uchar_t)flags;
6073 6074 }
6074 6075
6075 6076 void
6076 6077 page_clr_all_props(page_t *pp)
6077 6078 {
6078 6079 pp->p_nrm = 0;
6079 6080 }
6080 6081
6081 6082 /*
6082 6083 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required.
6083 6084 */
6084 6085 int
6085 6086 page_clear_lck_cow(page_t *pp, int adjust)
6086 6087 {
6087 6088 int f_amount;
6088 6089
6089 6090 ASSERT(PAGE_EXCL(pp));
6090 6091
6091 6092 /*
6092 6093 * The page_struct_lock need not be acquired here since
6093 6094 * we require the caller hold the page exclusively locked.
6094 6095 */
6095 6096 f_amount = 0;
6096 6097 if (pp->p_lckcnt) {
6097 6098 f_amount = 1;
6098 6099 pp->p_lckcnt = 0;
6099 6100 }
6100 6101 if (pp->p_cowcnt) {
6101 6102 f_amount += pp->p_cowcnt;
6102 6103 pp->p_cowcnt = 0;
6103 6104 }
6104 6105
6105 6106 if (adjust && f_amount) {
6106 6107 mutex_enter(&freemem_lock);
6107 6108 availrmem += f_amount;
6108 6109 mutex_exit(&freemem_lock);
6109 6110 }
6110 6111
6111 6112 return (f_amount);
6112 6113 }
6113 6114
6114 6115 /*
6115 6116 * The following functions is called from free_vp_pages()
6116 6117 * for an inexact estimate of a newly free'd page...
6117 6118 */
6118 6119 ulong_t
6119 6120 page_share_cnt(page_t *pp)
6120 6121 {
6121 6122 return (hat_page_getshare(pp));
6122 6123 }
6123 6124
6124 6125 int
6125 6126 page_isshared(page_t *pp)
6126 6127 {
6127 6128 return (hat_page_checkshare(pp, 1));
6128 6129 }
6129 6130
6130 6131 int
6131 6132 page_isfree(page_t *pp)
6132 6133 {
6133 6134 return (PP_ISFREE(pp));
6134 6135 }
6135 6136
6136 6137 int
6137 6138 page_isref(page_t *pp)
6138 6139 {
6139 6140 return (hat_page_getattr(pp, P_REF));
6140 6141 }
6141 6142
6142 6143 int
6143 6144 page_ismod(page_t *pp)
6144 6145 {
6145 6146 return (hat_page_getattr(pp, P_MOD));
6146 6147 }
6147 6148
6148 6149 /*
6149 6150 * The following code all currently relates to the page capture logic:
6150 6151 *
6151 6152 * This logic is used for cases where there is a desire to claim a certain
6152 6153 * physical page in the system for the caller. As it may not be possible
6153 6154 * to capture the page immediately, the p_toxic bits are used in the page
6154 6155 * structure to indicate that someone wants to capture this page. When the
6155 6156 * page gets unlocked, the toxic flag will be noted and an attempt to capture
6156 6157 * the page will be made. If it is successful, the original callers callback
6157 6158 * will be called with the page to do with it what they please.
6158 6159 *
6159 6160 * There is also an async thread which wakes up to attempt to capture
6160 6161 * pages occasionally which have the capture bit set. All of the pages which
6161 6162 * need to be captured asynchronously have been inserted into the
6162 6163 * page_capture_hash and thus this thread walks that hash list. Items in the
6163 6164 * hash have an expiration time so this thread handles that as well by removing
6164 6165 * the item from the hash if it has expired.
6165 6166 *
6166 6167 * Some important things to note are:
6167 6168 * - if the PR_CAPTURE bit is set on a page, then the page is in the
6168 6169 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed
6169 6170 * to set and clear this bit, and while the lock is held is the only time
6170 6171 * you can add or remove an entry from the hash.
6171 6172 * - the PR_CAPTURE bit can only be set and cleared while holding the
6172 6173 * page_capture_hash_head.pchh_mutex
6173 6174 * - the t_flag field of the thread struct is used with the T_CAPTURING
6174 6175 * flag to prevent recursion while dealing with large pages.
6175 6176 * - pages which need to be retired never expire on the page_capture_hash.
6176 6177 */
6177 6178
6178 6179 static void page_capture_thread(void);
6179 6180 static kthread_t *pc_thread_id;
6180 6181 kcondvar_t pc_cv;
6181 6182 static kmutex_t pc_thread_mutex;
6182 6183 static clock_t pc_thread_shortwait;
6183 6184 static clock_t pc_thread_longwait;
6184 6185 static int pc_thread_retry;
6185 6186
6186 6187 struct page_capture_callback pc_cb[PC_NUM_CALLBACKS];
6187 6188
6188 6189 /* Note that this is a circular linked list */
6189 6190 typedef struct page_capture_hash_bucket {
6190 6191 page_t *pp;
6191 6192 uchar_t szc;
6192 6193 uchar_t pri;
6193 6194 uint_t flags;
6194 6195 clock_t expires; /* lbolt at which this request expires. */
6195 6196 void *datap; /* Cached data passed in for callback */
6196 6197 struct page_capture_hash_bucket *next;
6197 6198 struct page_capture_hash_bucket *prev;
6198 6199 } page_capture_hash_bucket_t;
6199 6200
6200 6201 #define PC_PRI_HI 0 /* capture now */
6201 6202 #define PC_PRI_LO 1 /* capture later */
6202 6203 #define PC_NUM_PRI 2
6203 6204
6204 6205 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI)
6205 6206
6206 6207
6207 6208 /*
6208 6209 * Each hash bucket will have it's own mutex and two lists which are:
6209 6210 * active (0): represents requests which have not been processed by
6210 6211 * the page_capture async thread yet.
6211 6212 * walked (1): represents requests which have been processed by the
6212 6213 * page_capture async thread within it's given walk of this bucket.
6213 6214 *
6214 6215 * These are all needed so that we can synchronize all async page_capture
6215 6216 * events. When the async thread moves to a new bucket, it will append the
6216 6217 * walked list to the active list and walk each item one at a time, moving it
6217 6218 * from the active list to the walked list. Thus if there is an async request
6218 6219 * outstanding for a given page, it will always be in one of the two lists.
6219 6220 * New requests will always be added to the active list.
6220 6221 * If we were not able to capture a page before the request expired, we'd free
6221 6222 * up the request structure which would indicate to page_capture that there is
6222 6223 * no longer a need for the given page, and clear the PR_CAPTURE flag if
6223 6224 * possible.
6224 6225 */
6225 6226 typedef struct page_capture_hash_head {
6226 6227 kmutex_t pchh_mutex;
6227 6228 uint_t num_pages[PC_NUM_PRI];
6228 6229 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */
6229 6230 } page_capture_hash_head_t;
6230 6231
6231 6232 #ifdef DEBUG
6232 6233 #define NUM_PAGE_CAPTURE_BUCKETS 4
6233 6234 #else
6234 6235 #define NUM_PAGE_CAPTURE_BUCKETS 64
6235 6236 #endif
6236 6237
6237 6238 page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS];
6238 6239
6239 6240 /* for now use a very simple hash based upon the size of a page struct */
6240 6241 #define PAGE_CAPTURE_HASH(pp) \
6241 6242 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1)))
6242 6243
6243 6244 extern pgcnt_t swapfs_minfree;
6244 6245
6245 6246 int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap);
6246 6247
6247 6248 /*
6248 6249 * a callback function is required for page capture requests.
6249 6250 */
6250 6251 void
6251 6252 page_capture_register_callback(uint_t index, clock_t duration,
6252 6253 int (*cb_func)(page_t *, void *, uint_t))
6253 6254 {
6254 6255 ASSERT(pc_cb[index].cb_active == 0);
6255 6256 ASSERT(cb_func != NULL);
6256 6257 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER);
6257 6258 pc_cb[index].duration = duration;
6258 6259 pc_cb[index].cb_func = cb_func;
6259 6260 pc_cb[index].cb_active = 1;
6260 6261 rw_exit(&pc_cb[index].cb_rwlock);
6261 6262 }
6262 6263
6263 6264 void
6264 6265 page_capture_unregister_callback(uint_t index)
6265 6266 {
6266 6267 int i, j;
6267 6268 struct page_capture_hash_bucket *bp1;
6268 6269 struct page_capture_hash_bucket *bp2;
6269 6270 struct page_capture_hash_bucket *head = NULL;
6270 6271 uint_t flags = (1 << index);
6271 6272
6272 6273 rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER);
6273 6274 ASSERT(pc_cb[index].cb_active == 1);
6274 6275 pc_cb[index].duration = 0; /* Paranoia */
6275 6276 pc_cb[index].cb_func = NULL; /* Paranoia */
6276 6277 pc_cb[index].cb_active = 0;
6277 6278 rw_exit(&pc_cb[index].cb_rwlock);
6278 6279
6279 6280 /*
6280 6281 * Just move all the entries to a private list which we can walk
6281 6282 * through without the need to hold any locks.
6282 6283 * No more requests can get added to the hash lists for this consumer
6283 6284 * as the cb_active field for the callback has been cleared.
6284 6285 */
6285 6286 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
6286 6287 mutex_enter(&page_capture_hash[i].pchh_mutex);
6287 6288 for (j = 0; j < 2; j++) {
6288 6289 bp1 = page_capture_hash[i].lists[j].next;
6289 6290 /* walk through all but first (sentinel) element */
6290 6291 while (bp1 != &page_capture_hash[i].lists[j]) {
6291 6292 bp2 = bp1;
6292 6293 if (bp2->flags & flags) {
6293 6294 bp1 = bp2->next;
6294 6295 bp1->prev = bp2->prev;
6295 6296 bp2->prev->next = bp1;
6296 6297 bp2->next = head;
6297 6298 head = bp2;
6298 6299 /*
6299 6300 * Clear the PR_CAPTURE bit as we
6300 6301 * hold appropriate locks here.
6301 6302 */
6302 6303 page_clrtoxic(head->pp, PR_CAPTURE);
6303 6304 page_capture_hash[i].
6304 6305 num_pages[bp2->pri]--;
6305 6306 continue;
6306 6307 }
6307 6308 bp1 = bp1->next;
6308 6309 }
6309 6310 }
6310 6311 mutex_exit(&page_capture_hash[i].pchh_mutex);
6311 6312 }
6312 6313
6313 6314 while (head != NULL) {
6314 6315 bp1 = head;
6315 6316 head = head->next;
6316 6317 kmem_free(bp1, sizeof (*bp1));
6317 6318 }
6318 6319 }
6319 6320
6320 6321
6321 6322 /*
6322 6323 * Find pp in the active list and move it to the walked list if it
6323 6324 * exists.
6324 6325 * Note that most often pp should be at the front of the active list
6325 6326 * as it is currently used and thus there is no other sort of optimization
6326 6327 * being done here as this is a linked list data structure.
6327 6328 * Returns 1 on successful move or 0 if page could not be found.
6328 6329 */
6329 6330 static int
6330 6331 page_capture_move_to_walked(page_t *pp)
6331 6332 {
6332 6333 page_capture_hash_bucket_t *bp;
6333 6334 int index;
6334 6335
6335 6336 index = PAGE_CAPTURE_HASH(pp);
6336 6337
6337 6338 mutex_enter(&page_capture_hash[index].pchh_mutex);
6338 6339 bp = page_capture_hash[index].lists[0].next;
6339 6340 while (bp != &page_capture_hash[index].lists[0]) {
6340 6341 if (bp->pp == pp) {
6341 6342 /* Remove from old list */
6342 6343 bp->next->prev = bp->prev;
6343 6344 bp->prev->next = bp->next;
6344 6345
6345 6346 /* Add to new list */
6346 6347 bp->next = page_capture_hash[index].lists[1].next;
6347 6348 bp->prev = &page_capture_hash[index].lists[1];
6348 6349 page_capture_hash[index].lists[1].next = bp;
6349 6350 bp->next->prev = bp;
6350 6351
6351 6352 /*
6352 6353 * There is a small probability of page on a free
6353 6354 * list being retired while being allocated
6354 6355 * and before P_RAF is set on it. The page may
6355 6356 * end up marked as high priority request instead
6356 6357 * of low priority request.
6357 6358 * If P_RAF page is not marked as low priority request
6358 6359 * change it to low priority request.
6359 6360 */
6360 6361 page_capture_hash[index].num_pages[bp->pri]--;
6361 6362 bp->pri = PAGE_CAPTURE_PRIO(pp);
6362 6363 page_capture_hash[index].num_pages[bp->pri]++;
6363 6364 mutex_exit(&page_capture_hash[index].pchh_mutex);
6364 6365 return (1);
6365 6366 }
6366 6367 bp = bp->next;
6367 6368 }
6368 6369 mutex_exit(&page_capture_hash[index].pchh_mutex);
6369 6370 return (0);
6370 6371 }
6371 6372
6372 6373 /*
6373 6374 * Add a new entry to the page capture hash. The only case where a new
6374 6375 * entry is not added is when the page capture consumer is no longer registered.
6375 6376 * In this case, we'll silently not add the page to the hash. We know that
6376 6377 * page retire will always be registered for the case where we are currently
6377 6378 * unretiring a page and thus there are no conflicts.
6378 6379 */
6379 6380 static void
6380 6381 page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap)
6381 6382 {
6382 6383 page_capture_hash_bucket_t *bp1;
6383 6384 page_capture_hash_bucket_t *bp2;
6384 6385 int index;
6385 6386 int cb_index;
6386 6387 int i;
6387 6388 uchar_t pri;
6388 6389 #ifdef DEBUG
6389 6390 page_capture_hash_bucket_t *tp1;
6390 6391 int l;
6391 6392 #endif
6392 6393
6393 6394 ASSERT(!(flags & CAPTURE_ASYNC));
6394 6395
6395 6396 bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP);
6396 6397
6397 6398 bp1->pp = pp;
6398 6399 bp1->szc = szc;
6399 6400 bp1->flags = flags;
6400 6401 bp1->datap = datap;
6401 6402
6402 6403 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
6403 6404 if ((flags >> cb_index) & 1) {
6404 6405 break;
6405 6406 }
6406 6407 }
6407 6408
6408 6409 ASSERT(cb_index != PC_NUM_CALLBACKS);
6409 6410
6410 6411 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER);
6411 6412 if (pc_cb[cb_index].cb_active) {
6412 6413 if (pc_cb[cb_index].duration == -1) {
6413 6414 bp1->expires = (clock_t)-1;
6414 6415 } else {
6415 6416 bp1->expires = ddi_get_lbolt() +
6416 6417 pc_cb[cb_index].duration;
6417 6418 }
6418 6419 } else {
6419 6420 /* There's no callback registered so don't add to the hash */
6420 6421 rw_exit(&pc_cb[cb_index].cb_rwlock);
6421 6422 kmem_free(bp1, sizeof (*bp1));
6422 6423 return;
6423 6424 }
6424 6425
6425 6426 index = PAGE_CAPTURE_HASH(pp);
6426 6427
6427 6428 /*
6428 6429 * Only allow capture flag to be modified under this mutex.
6429 6430 * Prevents multiple entries for same page getting added.
6430 6431 */
6431 6432 mutex_enter(&page_capture_hash[index].pchh_mutex);
6432 6433
6433 6434 /*
6434 6435 * if not already on the hash, set capture bit and add to the hash
6435 6436 */
6436 6437 if (!(pp->p_toxic & PR_CAPTURE)) {
6437 6438 #ifdef DEBUG
6438 6439 /* Check for duplicate entries */
6439 6440 for (l = 0; l < 2; l++) {
6440 6441 tp1 = page_capture_hash[index].lists[l].next;
6441 6442 while (tp1 != &page_capture_hash[index].lists[l]) {
6442 6443 if (tp1->pp == pp) {
6443 6444 panic("page pp 0x%p already on hash "
6444 6445 "at 0x%p\n",
6445 6446 (void *)pp, (void *)tp1);
6446 6447 }
6447 6448 tp1 = tp1->next;
6448 6449 }
6449 6450 }
6450 6451
6451 6452 #endif
6452 6453 page_settoxic(pp, PR_CAPTURE);
6453 6454 pri = PAGE_CAPTURE_PRIO(pp);
6454 6455 bp1->pri = pri;
6455 6456 bp1->next = page_capture_hash[index].lists[0].next;
6456 6457 bp1->prev = &page_capture_hash[index].lists[0];
6457 6458 bp1->next->prev = bp1;
6458 6459 page_capture_hash[index].lists[0].next = bp1;
6459 6460 page_capture_hash[index].num_pages[pri]++;
6460 6461 if (flags & CAPTURE_RETIRE) {
6461 6462 page_retire_incr_pend_count(datap);
6462 6463 }
6463 6464 mutex_exit(&page_capture_hash[index].pchh_mutex);
6464 6465 rw_exit(&pc_cb[cb_index].cb_rwlock);
6465 6466 cv_signal(&pc_cv);
6466 6467 return;
6467 6468 }
6468 6469
6469 6470 /*
6470 6471 * A page retire request will replace any other request.
6471 6472 * A second physmem request which is for a different process than
6472 6473 * the currently registered one will be dropped as there is
6473 6474 * no way to hold the private data for both calls.
6474 6475 * In the future, once there are more callers, this will have to
6475 6476 * be worked out better as there needs to be private storage for
6476 6477 * at least each type of caller (maybe have datap be an array of
6477 6478 * *void's so that we can index based upon callers index).
6478 6479 */
6479 6480
6480 6481 /* walk hash list to update expire time */
6481 6482 for (i = 0; i < 2; i++) {
6482 6483 bp2 = page_capture_hash[index].lists[i].next;
6483 6484 while (bp2 != &page_capture_hash[index].lists[i]) {
6484 6485 if (bp2->pp == pp) {
6485 6486 if (flags & CAPTURE_RETIRE) {
6486 6487 if (!(bp2->flags & CAPTURE_RETIRE)) {
6487 6488 page_retire_incr_pend_count(
6488 6489 datap);
6489 6490 bp2->flags = flags;
6490 6491 bp2->expires = bp1->expires;
6491 6492 bp2->datap = datap;
6492 6493 }
6493 6494 } else {
6494 6495 ASSERT(flags & CAPTURE_PHYSMEM);
6495 6496 if (!(bp2->flags & CAPTURE_RETIRE) &&
6496 6497 (datap == bp2->datap)) {
6497 6498 bp2->expires = bp1->expires;
6498 6499 }
6499 6500 }
6500 6501 mutex_exit(&page_capture_hash[index].
6501 6502 pchh_mutex);
6502 6503 rw_exit(&pc_cb[cb_index].cb_rwlock);
6503 6504 kmem_free(bp1, sizeof (*bp1));
6504 6505 return;
6505 6506 }
6506 6507 bp2 = bp2->next;
6507 6508 }
6508 6509 }
6509 6510
6510 6511 /*
6511 6512 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes
6512 6513 * and thus it either has to be set or not set and can't change
6513 6514 * while holding the mutex above.
6514 6515 */
6515 6516 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n",
6516 6517 (void *)pp);
6517 6518 }
6518 6519
6519 6520 /*
6520 6521 * We have a page in our hands, lets try and make it ours by turning
6521 6522 * it into a clean page like it had just come off the freelists.
6522 6523 *
6523 6524 * Returns 0 on success, with the page still EXCL locked.
6524 6525 * On failure, the page will be unlocked, and returns EAGAIN
6525 6526 */
6526 6527 static int
6527 6528 page_capture_clean_page(page_t *pp)
6528 6529 {
6529 6530 page_t *newpp;
6530 6531 int skip_unlock = 0;
6531 6532 spgcnt_t count;
6532 6533 page_t *tpp;
6533 6534 int ret = 0;
6534 6535 int extra;
6535 6536
6536 6537 ASSERT(PAGE_EXCL(pp));
6537 6538 ASSERT(!PP_RETIRED(pp));
6538 6539 ASSERT(curthread->t_flag & T_CAPTURING);
6539 6540
6540 6541 if (PP_ISFREE(pp)) {
6541 6542 if (!page_reclaim(pp, NULL)) {
6542 6543 skip_unlock = 1;
6543 6544 ret = EAGAIN;
6544 6545 goto cleanup;
6545 6546 }
6546 6547 ASSERT(pp->p_szc == 0);
6547 6548 if (pp->p_vnode != NULL) {
6548 6549 /*
6549 6550 * Since this page came from the
6550 6551 * cachelist, we must destroy the
6551 6552 * old vnode association.
6552 6553 */
6553 6554 page_hashout(pp, NULL);
6554 6555 }
6555 6556 goto cleanup;
6556 6557 }
6557 6558
6558 6559 /*
6559 6560 * If we know page_relocate will fail, skip it
6560 6561 * It could still fail due to a UE on another page but we
6561 6562 * can't do anything about that.
6562 6563 */
6563 6564 if (pp->p_toxic & PR_UE) {
6564 6565 goto skip_relocate;
6565 6566 }
6566 6567
6567 6568 /*
6568 6569 * It's possible that pages can not have a vnode as fsflush comes
6569 6570 * through and cleans up these pages. It's ugly but that's how it is.
6570 6571 */
6571 6572 if (pp->p_vnode == NULL) {
6572 6573 goto skip_relocate;
6573 6574 }
6574 6575
6575 6576 /*
6576 6577 * Page was not free, so lets try to relocate it.
6577 6578 * page_relocate only works with root pages, so if this is not a root
6578 6579 * page, we need to demote it to try and relocate it.
6579 6580 * Unfortunately this is the best we can do right now.
6580 6581 */
6581 6582 newpp = NULL;
6582 6583 if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) {
6583 6584 if (page_try_demote_pages(pp) == 0) {
6584 6585 ret = EAGAIN;
6585 6586 goto cleanup;
6586 6587 }
6587 6588 }
6588 6589 ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL);
6589 6590 if (ret == 0) {
6590 6591 page_t *npp;
6591 6592 /* unlock the new page(s) */
6592 6593 while (count-- > 0) {
6593 6594 ASSERT(newpp != NULL);
6594 6595 npp = newpp;
6595 6596 page_sub(&newpp, npp);
6596 6597 page_unlock(npp);
6597 6598 }
6598 6599 ASSERT(newpp == NULL);
6599 6600 /*
6600 6601 * Check to see if the page we have is too large.
6601 6602 * If so, demote it freeing up the extra pages.
6602 6603 */
6603 6604 if (pp->p_szc > 0) {
6604 6605 /* For now demote extra pages to szc == 0 */
6605 6606 extra = page_get_pagecnt(pp->p_szc) - 1;
6606 6607 while (extra > 0) {
6607 6608 tpp = pp->p_next;
6608 6609 page_sub(&pp, tpp);
6609 6610 tpp->p_szc = 0;
6610 6611 page_free(tpp, 1);
6611 6612 extra--;
6612 6613 }
6613 6614 /* Make sure to set our page to szc 0 as well */
6614 6615 ASSERT(pp->p_next == pp && pp->p_prev == pp);
6615 6616 pp->p_szc = 0;
6616 6617 }
6617 6618 goto cleanup;
6618 6619 } else if (ret == EIO) {
6619 6620 ret = EAGAIN;
6620 6621 goto cleanup;
6621 6622 } else {
6622 6623 /*
6623 6624 * Need to reset return type as we failed to relocate the page
6624 6625 * but that does not mean that some of the next steps will not
6625 6626 * work.
6626 6627 */
6627 6628 ret = 0;
6628 6629 }
6629 6630
6630 6631 skip_relocate:
6631 6632
6632 6633 if (pp->p_szc > 0) {
6633 6634 if (page_try_demote_pages(pp) == 0) {
6634 6635 ret = EAGAIN;
6635 6636 goto cleanup;
6636 6637 }
6637 6638 }
6638 6639
6639 6640 ASSERT(pp->p_szc == 0);
6640 6641
6641 6642 if (hat_ismod(pp)) {
6642 6643 ret = EAGAIN;
6643 6644 goto cleanup;
6644 6645 }
6645 6646 if (PP_ISKAS(pp)) {
6646 6647 ret = EAGAIN;
6647 6648 goto cleanup;
6648 6649 }
6649 6650 if (pp->p_lckcnt || pp->p_cowcnt) {
6650 6651 ret = EAGAIN;
6651 6652 goto cleanup;
6652 6653 }
6653 6654
6654 6655 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
6655 6656 ASSERT(!hat_page_is_mapped(pp));
6656 6657
6657 6658 if (hat_ismod(pp)) {
6658 6659 /*
6659 6660 * This is a semi-odd case as the page is now modified but not
6660 6661 * mapped as we just unloaded the mappings above.
6661 6662 */
6662 6663 ret = EAGAIN;
6663 6664 goto cleanup;
6664 6665 }
6665 6666 if (pp->p_vnode != NULL) {
6666 6667 page_hashout(pp, NULL);
6667 6668 }
6668 6669
6669 6670 /*
6670 6671 * At this point, the page should be in a clean state and
6671 6672 * we can do whatever we want with it.
6672 6673 */
6673 6674
6674 6675 cleanup:
6675 6676 if (ret != 0) {
6676 6677 if (!skip_unlock) {
6677 6678 page_unlock(pp);
6678 6679 }
6679 6680 } else {
6680 6681 ASSERT(pp->p_szc == 0);
6681 6682 ASSERT(PAGE_EXCL(pp));
6682 6683
6683 6684 pp->p_next = pp;
6684 6685 pp->p_prev = pp;
6685 6686 }
6686 6687 return (ret);
6687 6688 }
6688 6689
6689 6690 /*
6690 6691 * Various callers of page_trycapture() can have different restrictions upon
6691 6692 * what memory they have access to.
6692 6693 * Returns 0 on success, with the following error codes on failure:
6693 6694 * EPERM - The requested page is long term locked, and thus repeated
6694 6695 * requests to capture this page will likely fail.
6695 6696 * ENOMEM - There was not enough free memory in the system to safely
6696 6697 * map the requested page.
6697 6698 * ENOENT - The requested page was inside the kernel cage, and the
6698 6699 * PHYSMEM_CAGE flag was not set.
6699 6700 */
6700 6701 int
6701 6702 page_capture_pre_checks(page_t *pp, uint_t flags)
6702 6703 {
6703 6704 ASSERT(pp != NULL);
6704 6705
6705 6706 #if defined(__sparc)
6706 6707 if (pp->p_vnode == &promvp) {
6707 6708 return (EPERM);
6708 6709 }
6709 6710
6710 6711 if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) &&
6711 6712 (flags & CAPTURE_PHYSMEM)) {
6712 6713 return (ENOENT);
6713 6714 }
6714 6715
6715 6716 if (PP_ISNORELOCKERNEL(pp)) {
6716 6717 return (EPERM);
6717 6718 }
6718 6719 #else
6719 6720 if (PP_ISKAS(pp)) {
6720 6721 return (EPERM);
6721 6722 }
6722 6723 #endif /* __sparc */
6723 6724
6724 6725 /* only physmem currently has the restrictions checked below */
6725 6726 if (!(flags & CAPTURE_PHYSMEM)) {
6726 6727 return (0);
6727 6728 }
6728 6729
6729 6730 if (availrmem < swapfs_minfree) {
6730 6731 /*
6731 6732 * We won't try to capture this page as we are
6732 6733 * running low on memory.
6733 6734 */
6734 6735 return (ENOMEM);
6735 6736 }
6736 6737 return (0);
6737 6738 }
6738 6739
6739 6740 /*
6740 6741 * Once we have a page in our mits, go ahead and complete the capture
6741 6742 * operation.
6742 6743 * Returns 1 on failure where page is no longer needed
6743 6744 * Returns 0 on success
6744 6745 * Returns -1 if there was a transient failure.
6745 6746 * Failure cases must release the SE_EXCL lock on pp (usually via page_free).
6746 6747 */
6747 6748 int
6748 6749 page_capture_take_action(page_t *pp, uint_t flags, void *datap)
6749 6750 {
6750 6751 int cb_index;
6751 6752 int ret = 0;
6752 6753 page_capture_hash_bucket_t *bp1;
6753 6754 page_capture_hash_bucket_t *bp2;
6754 6755 int index;
6755 6756 int found = 0;
6756 6757 int i;
6757 6758
6758 6759 ASSERT(PAGE_EXCL(pp));
6759 6760 ASSERT(curthread->t_flag & T_CAPTURING);
6760 6761
6761 6762 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
6762 6763 if ((flags >> cb_index) & 1) {
6763 6764 break;
6764 6765 }
6765 6766 }
6766 6767 ASSERT(cb_index < PC_NUM_CALLBACKS);
6767 6768
6768 6769 /*
6769 6770 * Remove the entry from the page_capture hash, but don't free it yet
6770 6771 * as we may need to put it back.
6771 6772 * Since we own the page at this point in time, we should find it
6772 6773 * in the hash if this is an ASYNC call. If we don't it's likely
6773 6774 * that the page_capture_async() thread decided that this request
6774 6775 * had expired, in which case we just continue on.
6775 6776 */
6776 6777 if (flags & CAPTURE_ASYNC) {
6777 6778
6778 6779 index = PAGE_CAPTURE_HASH(pp);
6779 6780
6780 6781 mutex_enter(&page_capture_hash[index].pchh_mutex);
6781 6782 for (i = 0; i < 2 && !found; i++) {
6782 6783 bp1 = page_capture_hash[index].lists[i].next;
6783 6784 while (bp1 != &page_capture_hash[index].lists[i]) {
6784 6785 if (bp1->pp == pp) {
6785 6786 bp1->next->prev = bp1->prev;
6786 6787 bp1->prev->next = bp1->next;
6787 6788 page_capture_hash[index].
6788 6789 num_pages[bp1->pri]--;
6789 6790 page_clrtoxic(pp, PR_CAPTURE);
6790 6791 found = 1;
6791 6792 break;
6792 6793 }
6793 6794 bp1 = bp1->next;
6794 6795 }
6795 6796 }
6796 6797 mutex_exit(&page_capture_hash[index].pchh_mutex);
6797 6798 }
6798 6799
6799 6800 /* Synchronize with the unregister func. */
6800 6801 rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER);
6801 6802 if (!pc_cb[cb_index].cb_active) {
6802 6803 page_free(pp, 1);
6803 6804 rw_exit(&pc_cb[cb_index].cb_rwlock);
6804 6805 if (found) {
6805 6806 kmem_free(bp1, sizeof (*bp1));
6806 6807 }
6807 6808 return (1);
6808 6809 }
6809 6810
6810 6811 /*
6811 6812 * We need to remove the entry from the page capture hash and turn off
6812 6813 * the PR_CAPTURE bit before calling the callback. We'll need to cache
6813 6814 * the entry here, and then based upon the return value, cleanup
6814 6815 * appropriately or re-add it to the hash, making sure that someone else
6815 6816 * hasn't already done so.
6816 6817 * It should be rare for the callback to fail and thus it's ok for
6817 6818 * the failure path to be a bit complicated as the success path is
6818 6819 * cleaner and the locking rules are easier to follow.
6819 6820 */
6820 6821
6821 6822 ret = pc_cb[cb_index].cb_func(pp, datap, flags);
6822 6823
6823 6824 rw_exit(&pc_cb[cb_index].cb_rwlock);
6824 6825
6825 6826 /*
6826 6827 * If this was an ASYNC request, we need to cleanup the hash if the
6827 6828 * callback was successful or if the request was no longer valid.
6828 6829 * For non-ASYNC requests, we return failure to map and the caller
6829 6830 * will take care of adding the request to the hash.
6830 6831 * Note also that the callback itself is responsible for the page
6831 6832 * at this point in time in terms of locking ... The most common
6832 6833 * case for the failure path should just be a page_free.
6833 6834 */
6834 6835 if (ret >= 0) {
6835 6836 if (found) {
6836 6837 if (bp1->flags & CAPTURE_RETIRE) {
6837 6838 page_retire_decr_pend_count(datap);
6838 6839 }
6839 6840 kmem_free(bp1, sizeof (*bp1));
6840 6841 }
6841 6842 return (ret);
6842 6843 }
6843 6844 if (!found) {
6844 6845 return (ret);
6845 6846 }
6846 6847
6847 6848 ASSERT(flags & CAPTURE_ASYNC);
6848 6849
6849 6850 /*
6850 6851 * Check for expiration time first as we can just free it up if it's
6851 6852 * expired.
6852 6853 */
6853 6854 if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) {
6854 6855 kmem_free(bp1, sizeof (*bp1));
6855 6856 return (ret);
6856 6857 }
6857 6858
6858 6859 /*
6859 6860 * The callback failed and there used to be an entry in the hash for
6860 6861 * this page, so we need to add it back to the hash.
6861 6862 */
6862 6863 mutex_enter(&page_capture_hash[index].pchh_mutex);
6863 6864 if (!(pp->p_toxic & PR_CAPTURE)) {
6864 6865 /* just add bp1 back to head of walked list */
6865 6866 page_settoxic(pp, PR_CAPTURE);
6866 6867 bp1->next = page_capture_hash[index].lists[1].next;
6867 6868 bp1->prev = &page_capture_hash[index].lists[1];
6868 6869 bp1->next->prev = bp1;
6869 6870 bp1->pri = PAGE_CAPTURE_PRIO(pp);
6870 6871 page_capture_hash[index].lists[1].next = bp1;
6871 6872 page_capture_hash[index].num_pages[bp1->pri]++;
6872 6873 mutex_exit(&page_capture_hash[index].pchh_mutex);
6873 6874 return (ret);
6874 6875 }
6875 6876
6876 6877 /*
6877 6878 * Otherwise there was a new capture request added to list
6878 6879 * Need to make sure that our original data is represented if
6879 6880 * appropriate.
6880 6881 */
6881 6882 for (i = 0; i < 2; i++) {
6882 6883 bp2 = page_capture_hash[index].lists[i].next;
6883 6884 while (bp2 != &page_capture_hash[index].lists[i]) {
6884 6885 if (bp2->pp == pp) {
6885 6886 if (bp1->flags & CAPTURE_RETIRE) {
6886 6887 if (!(bp2->flags & CAPTURE_RETIRE)) {
6887 6888 bp2->szc = bp1->szc;
6888 6889 bp2->flags = bp1->flags;
6889 6890 bp2->expires = bp1->expires;
6890 6891 bp2->datap = bp1->datap;
6891 6892 }
6892 6893 } else {
6893 6894 ASSERT(bp1->flags & CAPTURE_PHYSMEM);
6894 6895 if (!(bp2->flags & CAPTURE_RETIRE)) {
6895 6896 bp2->szc = bp1->szc;
6896 6897 bp2->flags = bp1->flags;
6897 6898 bp2->expires = bp1->expires;
6898 6899 bp2->datap = bp1->datap;
6899 6900 }
6900 6901 }
6901 6902 page_capture_hash[index].num_pages[bp2->pri]--;
6902 6903 bp2->pri = PAGE_CAPTURE_PRIO(pp);
6903 6904 page_capture_hash[index].num_pages[bp2->pri]++;
6904 6905 mutex_exit(&page_capture_hash[index].
6905 6906 pchh_mutex);
6906 6907 kmem_free(bp1, sizeof (*bp1));
6907 6908 return (ret);
6908 6909 }
6909 6910 bp2 = bp2->next;
6910 6911 }
6911 6912 }
6912 6913 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp);
6913 6914 /*NOTREACHED*/
6914 6915 }
6915 6916
6916 6917 /*
6917 6918 * Try to capture the given page for the caller specified in the flags
6918 6919 * parameter. The page will either be captured and handed over to the
6919 6920 * appropriate callback, or will be queued up in the page capture hash
6920 6921 * to be captured asynchronously.
6921 6922 * If the current request is due to an async capture, the page must be
6922 6923 * exclusively locked before calling this function.
6923 6924 * Currently szc must be 0 but in the future this should be expandable to
6924 6925 * other page sizes.
6925 6926 * Returns 0 on success, with the following error codes on failure:
6926 6927 * EPERM - The requested page is long term locked, and thus repeated
6927 6928 * requests to capture this page will likely fail.
6928 6929 * ENOMEM - There was not enough free memory in the system to safely
6929 6930 * map the requested page.
6930 6931 * ENOENT - The requested page was inside the kernel cage, and the
6931 6932 * CAPTURE_GET_CAGE flag was not set.
6932 6933 * EAGAIN - The requested page could not be capturead at this point in
6933 6934 * time but future requests will likely work.
6934 6935 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag
6935 6936 * was not set.
6936 6937 */
6937 6938 int
6938 6939 page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
6939 6940 {
6940 6941 int ret;
6941 6942 int cb_index;
6942 6943
6943 6944 if (flags & CAPTURE_ASYNC) {
6944 6945 ASSERT(PAGE_EXCL(pp));
6945 6946 goto async;
6946 6947 }
6947 6948
6948 6949 /* Make sure there's enough availrmem ... */
6949 6950 ret = page_capture_pre_checks(pp, flags);
6950 6951 if (ret != 0) {
6951 6952 return (ret);
6952 6953 }
6953 6954
6954 6955 if (!page_trylock(pp, SE_EXCL)) {
6955 6956 for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
6956 6957 if ((flags >> cb_index) & 1) {
6957 6958 break;
6958 6959 }
6959 6960 }
6960 6961 ASSERT(cb_index < PC_NUM_CALLBACKS);
6961 6962 ret = EAGAIN;
6962 6963 /* Special case for retired pages */
6963 6964 if (PP_RETIRED(pp)) {
6964 6965 if (flags & CAPTURE_GET_RETIRED) {
6965 6966 if (!page_unretire_pp(pp, PR_UNR_TEMP)) {
6966 6967 /*
6967 6968 * Need to set capture bit and add to
6968 6969 * hash so that the page will be
6969 6970 * retired when freed.
6970 6971 */
6971 6972 page_capture_add_hash(pp, szc,
6972 6973 CAPTURE_RETIRE, NULL);
6973 6974 ret = 0;
6974 6975 goto own_page;
6975 6976 }
6976 6977 } else {
6977 6978 return (EBUSY);
6978 6979 }
6979 6980 }
6980 6981 page_capture_add_hash(pp, szc, flags, datap);
6981 6982 return (ret);
6982 6983 }
6983 6984
6984 6985 async:
6985 6986 ASSERT(PAGE_EXCL(pp));
6986 6987
6987 6988 /* Need to check for physmem async requests that availrmem is sane */
6988 6989 if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) ==
6989 6990 (CAPTURE_ASYNC | CAPTURE_PHYSMEM) &&
6990 6991 (availrmem < swapfs_minfree)) {
6991 6992 page_unlock(pp);
6992 6993 return (ENOMEM);
6993 6994 }
6994 6995
6995 6996 ret = page_capture_clean_page(pp);
6996 6997
6997 6998 if (ret != 0) {
6998 6999 /* We failed to get the page, so lets add it to the hash */
6999 7000 if (!(flags & CAPTURE_ASYNC)) {
7000 7001 page_capture_add_hash(pp, szc, flags, datap);
7001 7002 }
7002 7003 return (ret);
7003 7004 }
7004 7005
7005 7006 own_page:
7006 7007 ASSERT(PAGE_EXCL(pp));
7007 7008 ASSERT(pp->p_szc == 0);
7008 7009
7009 7010 /* Call the callback */
7010 7011 ret = page_capture_take_action(pp, flags, datap);
7011 7012
7012 7013 if (ret == 0) {
7013 7014 return (0);
7014 7015 }
7015 7016
7016 7017 /*
7017 7018 * Note that in the failure cases from page_capture_take_action, the
7018 7019 * EXCL lock will have already been dropped.
7019 7020 */
7020 7021 if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) {
7021 7022 page_capture_add_hash(pp, szc, flags, datap);
7022 7023 }
7023 7024 return (EAGAIN);
7024 7025 }
7025 7026
7026 7027 int
7027 7028 page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
7028 7029 {
7029 7030 int ret;
7030 7031
7031 7032 curthread->t_flag |= T_CAPTURING;
7032 7033 ret = page_itrycapture(pp, szc, flags, datap);
7033 7034 curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */
7034 7035 return (ret);
7035 7036 }
7036 7037
7037 7038 /*
7038 7039 * When unlocking a page which has the PR_CAPTURE bit set, this routine
7039 7040 * gets called to try and capture the page.
7040 7041 */
7041 7042 void
7042 7043 page_unlock_capture(page_t *pp)
7043 7044 {
7044 7045 page_capture_hash_bucket_t *bp;
7045 7046 int index;
7046 7047 int i;
7047 7048 uint_t szc;
7048 7049 uint_t flags = 0;
7049 7050 void *datap;
7050 7051 kmutex_t *mp;
7051 7052 extern vnode_t retired_pages;
7052 7053
7053 7054 /*
7054 7055 * We need to protect against a possible deadlock here where we own
7055 7056 * the vnode page hash mutex and want to acquire it again as there
7056 7057 * are locations in the code, where we unlock a page while holding
7057 7058 * the mutex which can lead to the page being captured and eventually
7058 7059 * end up here. As we may be hashing out the old page and hashing into
7059 7060 * the retire vnode, we need to make sure we don't own them.
7060 7061 * Other callbacks who do hash operations also need to make sure that
7061 7062 * before they hashin to a vnode that they do not currently own the
7062 7063 * vphm mutex otherwise there will be a panic.
7063 7064 */
7064 7065 if (mutex_owned(page_vnode_mutex(&retired_pages))) {
7065 7066 page_unlock_nocapture(pp);
7066 7067 return;
7067 7068 }
7068 7069 if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) {
7069 7070 page_unlock_nocapture(pp);
7070 7071 return;
7071 7072 }
7072 7073
7073 7074 index = PAGE_CAPTURE_HASH(pp);
7074 7075
7075 7076 mp = &page_capture_hash[index].pchh_mutex;
7076 7077 mutex_enter(mp);
7077 7078 for (i = 0; i < 2; i++) {
7078 7079 bp = page_capture_hash[index].lists[i].next;
7079 7080 while (bp != &page_capture_hash[index].lists[i]) {
7080 7081 if (bp->pp == pp) {
7081 7082 szc = bp->szc;
7082 7083 flags = bp->flags | CAPTURE_ASYNC;
7083 7084 datap = bp->datap;
7084 7085 mutex_exit(mp);
7085 7086 (void) page_trycapture(pp, szc, flags, datap);
7086 7087 return;
7087 7088 }
7088 7089 bp = bp->next;
7089 7090 }
7090 7091 }
7091 7092
7092 7093 /* Failed to find page in hash so clear flags and unlock it. */
7093 7094 page_clrtoxic(pp, PR_CAPTURE);
7094 7095 page_unlock(pp);
7095 7096
7096 7097 mutex_exit(mp);
7097 7098 }
7098 7099
7099 7100 void
7100 7101 page_capture_init()
7101 7102 {
7102 7103 int i;
7103 7104 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
7104 7105 page_capture_hash[i].lists[0].next =
7105 7106 &page_capture_hash[i].lists[0];
7106 7107 page_capture_hash[i].lists[0].prev =
7107 7108 &page_capture_hash[i].lists[0];
7108 7109 page_capture_hash[i].lists[1].next =
7109 7110 &page_capture_hash[i].lists[1];
7110 7111 page_capture_hash[i].lists[1].prev =
7111 7112 &page_capture_hash[i].lists[1];
7112 7113 }
7113 7114
7114 7115 pc_thread_shortwait = 23 * hz;
7115 7116 pc_thread_longwait = 1201 * hz;
7116 7117 pc_thread_retry = 3;
7117 7118 mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL);
7118 7119 cv_init(&pc_cv, NULL, CV_DEFAULT, NULL);
7119 7120 pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0,
7120 7121 TS_RUN, minclsyspri);
7121 7122 }
7122 7123
7123 7124 /*
7124 7125 * It is necessary to scrub any failing pages prior to reboot in order to
7125 7126 * prevent a latent error trap from occurring on the next boot.
7126 7127 */
7127 7128 void
7128 7129 page_retire_mdboot()
7129 7130 {
7130 7131 page_t *pp;
7131 7132 int i, j;
7132 7133 page_capture_hash_bucket_t *bp;
7133 7134 uchar_t pri;
7134 7135
7135 7136 /* walk lists looking for pages to scrub */
7136 7137 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
7137 7138 for (pri = 0; pri < PC_NUM_PRI; pri++) {
7138 7139 if (page_capture_hash[i].num_pages[pri] != 0) {
7139 7140 break;
7140 7141 }
7141 7142 }
7142 7143 if (pri == PC_NUM_PRI)
7143 7144 continue;
7144 7145
7145 7146 mutex_enter(&page_capture_hash[i].pchh_mutex);
7146 7147
7147 7148 for (j = 0; j < 2; j++) {
7148 7149 bp = page_capture_hash[i].lists[j].next;
7149 7150 while (bp != &page_capture_hash[i].lists[j]) {
7150 7151 pp = bp->pp;
7151 7152 if (PP_TOXIC(pp)) {
7152 7153 if (page_trylock(pp, SE_EXCL)) {
7153 7154 PP_CLRFREE(pp);
7154 7155 pagescrub(pp, 0, PAGESIZE);
7155 7156 page_unlock(pp);
7156 7157 }
7157 7158 }
7158 7159 bp = bp->next;
7159 7160 }
7160 7161 }
7161 7162 mutex_exit(&page_capture_hash[i].pchh_mutex);
7162 7163 }
7163 7164 }
7164 7165
7165 7166 /*
7166 7167 * Walk the page_capture_hash trying to capture pages and also cleanup old
7167 7168 * entries which have expired.
7168 7169 */
7169 7170 void
7170 7171 page_capture_async()
7171 7172 {
7172 7173 page_t *pp;
7173 7174 int i;
7174 7175 int ret;
7175 7176 page_capture_hash_bucket_t *bp1, *bp2;
7176 7177 uint_t szc;
7177 7178 uint_t flags;
7178 7179 void *datap;
7179 7180 uchar_t pri;
7180 7181
7181 7182 /* If there are outstanding pages to be captured, get to work */
7182 7183 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
7183 7184 for (pri = 0; pri < PC_NUM_PRI; pri++) {
7184 7185 if (page_capture_hash[i].num_pages[pri] != 0)
7185 7186 break;
7186 7187 }
7187 7188 if (pri == PC_NUM_PRI)
7188 7189 continue;
7189 7190
7190 7191 /* Append list 1 to list 0 and then walk through list 0 */
7191 7192 mutex_enter(&page_capture_hash[i].pchh_mutex);
7192 7193 bp1 = &page_capture_hash[i].lists[1];
7193 7194 bp2 = bp1->next;
7194 7195 if (bp1 != bp2) {
7195 7196 bp1->prev->next = page_capture_hash[i].lists[0].next;
7196 7197 bp2->prev = &page_capture_hash[i].lists[0];
7197 7198 page_capture_hash[i].lists[0].next->prev = bp1->prev;
7198 7199 page_capture_hash[i].lists[0].next = bp2;
7199 7200 bp1->next = bp1;
7200 7201 bp1->prev = bp1;
7201 7202 }
7202 7203
7203 7204 /* list[1] will be empty now */
7204 7205
7205 7206 bp1 = page_capture_hash[i].lists[0].next;
7206 7207 while (bp1 != &page_capture_hash[i].lists[0]) {
7207 7208 /* Check expiration time */
7208 7209 if ((ddi_get_lbolt() > bp1->expires &&
7209 7210 bp1->expires != -1) ||
7210 7211 page_deleted(bp1->pp)) {
7211 7212 page_capture_hash[i].lists[0].next = bp1->next;
7212 7213 bp1->next->prev =
7213 7214 &page_capture_hash[i].lists[0];
7214 7215 page_capture_hash[i].num_pages[bp1->pri]--;
7215 7216
7216 7217 /*
7217 7218 * We can safely remove the PR_CAPTURE bit
7218 7219 * without holding the EXCL lock on the page
7219 7220 * as the PR_CAPTURE bit requres that the
7220 7221 * page_capture_hash[].pchh_mutex be held
7221 7222 * to modify it.
7222 7223 */
7223 7224 page_clrtoxic(bp1->pp, PR_CAPTURE);
7224 7225 mutex_exit(&page_capture_hash[i].pchh_mutex);
7225 7226 kmem_free(bp1, sizeof (*bp1));
7226 7227 mutex_enter(&page_capture_hash[i].pchh_mutex);
7227 7228 bp1 = page_capture_hash[i].lists[0].next;
7228 7229 continue;
7229 7230 }
7230 7231 pp = bp1->pp;
7231 7232 szc = bp1->szc;
7232 7233 flags = bp1->flags;
7233 7234 datap = bp1->datap;
7234 7235 mutex_exit(&page_capture_hash[i].pchh_mutex);
7235 7236 if (page_trylock(pp, SE_EXCL)) {
7236 7237 ret = page_trycapture(pp, szc,
7237 7238 flags | CAPTURE_ASYNC, datap);
7238 7239 } else {
7239 7240 ret = 1; /* move to walked hash */
7240 7241 }
7241 7242
7242 7243 if (ret != 0) {
7243 7244 /* Move to walked hash */
7244 7245 (void) page_capture_move_to_walked(pp);
7245 7246 }
7246 7247 mutex_enter(&page_capture_hash[i].pchh_mutex);
7247 7248 bp1 = page_capture_hash[i].lists[0].next;
7248 7249 }
7249 7250
7250 7251 mutex_exit(&page_capture_hash[i].pchh_mutex);
7251 7252 }
7252 7253 }
7253 7254
7254 7255 /*
7255 7256 * This function is called by the page_capture_thread, and is needed in
7256 7257 * in order to initiate aio cleanup, so that pages used in aio
7257 7258 * will be unlocked and subsequently retired by page_capture_thread.
7258 7259 */
7259 7260 static int
7260 7261 do_aio_cleanup(void)
7261 7262 {
7262 7263 proc_t *procp;
7263 7264 int (*aio_cleanup_dr_delete_memory)(proc_t *);
7264 7265 int cleaned = 0;
7265 7266
7266 7267 if (modload("sys", "kaio") == -1) {
7267 7268 cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio");
7268 7269 return (0);
7269 7270 }
7270 7271 /*
7271 7272 * We use the aio_cleanup_dr_delete_memory function to
7272 7273 * initiate the actual clean up; this function will wake
7273 7274 * up the per-process aio_cleanup_thread.
7274 7275 */
7275 7276 aio_cleanup_dr_delete_memory = (int (*)(proc_t *))
7276 7277 modgetsymvalue("aio_cleanup_dr_delete_memory", 0);
7277 7278 if (aio_cleanup_dr_delete_memory == NULL) {
7278 7279 cmn_err(CE_WARN,
7279 7280 "aio_cleanup_dr_delete_memory not found in kaio");
7280 7281 return (0);
7281 7282 }
7282 7283 mutex_enter(&pidlock);
7283 7284 for (procp = practive; (procp != NULL); procp = procp->p_next) {
7284 7285 mutex_enter(&procp->p_lock);
7285 7286 if (procp->p_aio != NULL) {
7286 7287 /* cleanup proc's outstanding kaio */
7287 7288 cleaned += (*aio_cleanup_dr_delete_memory)(procp);
7288 7289 }
7289 7290 mutex_exit(&procp->p_lock);
7290 7291 }
7291 7292 mutex_exit(&pidlock);
7292 7293 return (cleaned);
7293 7294 }
7294 7295
7295 7296 /*
7296 7297 * helper function for page_capture_thread
7297 7298 */
7298 7299 static void
7299 7300 page_capture_handle_outstanding(void)
7300 7301 {
7301 7302 int ntry;
7302 7303
7303 7304 /* Reap pages before attempting capture pages */
7304 7305 kmem_reap();
7305 7306
7306 7307 if ((page_retire_pend_count() > page_retire_pend_kas_count()) &&
7307 7308 hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
7308 7309 /*
7309 7310 * Note: Purging only for platforms that support
7310 7311 * ISM hat_pageunload() - mainly SPARC. On x86/x64
7311 7312 * platforms ISM pages SE_SHARED locked until destroyed.
7312 7313 */
7313 7314
7314 7315 /* disable and purge seg_pcache */
7315 7316 (void) seg_p_disable();
7316 7317 for (ntry = 0; ntry < pc_thread_retry; ntry++) {
7317 7318 if (!page_retire_pend_count())
7318 7319 break;
7319 7320 if (do_aio_cleanup()) {
7320 7321 /*
7321 7322 * allow the apps cleanup threads
7322 7323 * to run
7323 7324 */
7324 7325 delay(pc_thread_shortwait);
7325 7326 }
7326 7327 page_capture_async();
7327 7328 }
7328 7329 /* reenable seg_pcache */
7329 7330 seg_p_enable();
7330 7331
7331 7332 /* completed what can be done. break out */
7332 7333 return;
7333 7334 }
7334 7335
7335 7336 /*
7336 7337 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap
7337 7338 * and then attempt to capture.
7338 7339 */
7339 7340 seg_preap();
7340 7341 page_capture_async();
7341 7342 }
7342 7343
7343 7344 /*
7344 7345 * The page_capture_thread loops forever, looking to see if there are
7345 7346 * pages still waiting to be captured.
7346 7347 */
7347 7348 static void
7348 7349 page_capture_thread(void)
7349 7350 {
7350 7351 callb_cpr_t c;
7351 7352 int i;
7352 7353 int high_pri_pages;
7353 7354 int low_pri_pages;
7354 7355 clock_t timeout;
7355 7356
7356 7357 CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture");
7357 7358
7358 7359 mutex_enter(&pc_thread_mutex);
7359 7360 for (;;) {
7360 7361 high_pri_pages = 0;
7361 7362 low_pri_pages = 0;
7362 7363 for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
7363 7364 high_pri_pages +=
7364 7365 page_capture_hash[i].num_pages[PC_PRI_HI];
7365 7366 low_pri_pages +=
7366 7367 page_capture_hash[i].num_pages[PC_PRI_LO];
7367 7368 }
7368 7369
7369 7370 timeout = pc_thread_longwait;
7370 7371 if (high_pri_pages != 0) {
7371 7372 timeout = pc_thread_shortwait;
7372 7373 page_capture_handle_outstanding();
7373 7374 } else if (low_pri_pages != 0) {
7374 7375 page_capture_async();
7375 7376 }
7376 7377 CALLB_CPR_SAFE_BEGIN(&c);
7377 7378 (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex,
7378 7379 timeout, TR_CLOCK_TICK);
7379 7380 CALLB_CPR_SAFE_END(&c, &pc_thread_mutex);
7380 7381 }
7381 7382 /*NOTREACHED*/
7382 7383 }
7383 7384 /*
7384 7385 * Attempt to locate a bucket that has enough pages to satisfy the request.
7385 7386 * The initial check is done without the lock to avoid unneeded contention.
7386 7387 * The function returns 1 if enough pages were found, else 0 if it could not
7387 7388 * find enough pages in a bucket.
7388 7389 */
7389 7390 static int
7390 7391 pcf_decrement_bucket(pgcnt_t npages)
7391 7392 {
7392 7393 struct pcf *p;
7393 7394 struct pcf *q;
7394 7395 int i;
7395 7396
7396 7397 p = &pcf[PCF_INDEX()];
7397 7398 q = &pcf[pcf_fanout];
7398 7399 for (i = 0; i < pcf_fanout; i++) {
7399 7400 if (p->pcf_count > npages) {
7400 7401 /*
7401 7402 * a good one to try.
7402 7403 */
7403 7404 mutex_enter(&p->pcf_lock);
7404 7405 if (p->pcf_count > npages) {
7405 7406 p->pcf_count -= (uint_t)npages;
7406 7407 /*
7407 7408 * freemem is not protected by any lock.
7408 7409 * Thus, we cannot have any assertion
7409 7410 * containing freemem here.
7410 7411 */
7411 7412 freemem -= npages;
7412 7413 mutex_exit(&p->pcf_lock);
7413 7414 return (1);
7414 7415 }
7415 7416 mutex_exit(&p->pcf_lock);
7416 7417 }
7417 7418 p++;
7418 7419 if (p >= q) {
7419 7420 p = pcf;
7420 7421 }
7421 7422 }
7422 7423 return (0);
7423 7424 }
7424 7425
7425 7426 /*
7426 7427 * Arguments:
7427 7428 * pcftotal_ret: If the value is not NULL and we have walked all the
7428 7429 * buckets but did not find enough pages then it will
7429 7430 * be set to the total number of pages in all the pcf
7430 7431 * buckets.
7431 7432 * npages: Is the number of pages we have been requested to
7432 7433 * find.
7433 7434 * unlock: If set to 0 we will leave the buckets locked if the
7434 7435 * requested number of pages are not found.
7435 7436 *
7436 7437 * Go and try to satisfy the page request from any number of buckets.
7437 7438 * This can be a very expensive operation as we have to lock the buckets
7438 7439 * we are checking (and keep them locked), starting at bucket 0.
7439 7440 *
7440 7441 * The function returns 1 if enough pages were found, else 0 if it could not
7441 7442 * find enough pages in the buckets.
7442 7443 *
7443 7444 */
7444 7445 static int
7445 7446 pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock)
7446 7447 {
7447 7448 struct pcf *p;
7448 7449 pgcnt_t pcftotal;
7449 7450 int i;
7450 7451
7451 7452 p = pcf;
7452 7453 /* try to collect pages from several pcf bins */
7453 7454 for (pcftotal = 0, i = 0; i < pcf_fanout; i++) {
7454 7455 mutex_enter(&p->pcf_lock);
7455 7456 pcftotal += p->pcf_count;
7456 7457 if (pcftotal >= npages) {
7457 7458 /*
7458 7459 * Wow! There are enough pages laying around
7459 7460 * to satisfy the request. Do the accounting,
7460 7461 * drop the locks we acquired, and go back.
7461 7462 *
7462 7463 * freemem is not protected by any lock. So,
7463 7464 * we cannot have any assertion containing
7464 7465 * freemem.
7465 7466 */
7466 7467 freemem -= npages;
7467 7468 while (p >= pcf) {
7468 7469 if (p->pcf_count <= npages) {
7469 7470 npages -= p->pcf_count;
7470 7471 p->pcf_count = 0;
7471 7472 } else {
7472 7473 p->pcf_count -= (uint_t)npages;
7473 7474 npages = 0;
7474 7475 }
7475 7476 mutex_exit(&p->pcf_lock);
7476 7477 p--;
7477 7478 }
7478 7479 ASSERT(npages == 0);
7479 7480 return (1);
7480 7481 }
7481 7482 p++;
7482 7483 }
7483 7484 if (unlock) {
7484 7485 /* failed to collect pages - release the locks */
7485 7486 while (--p >= pcf) {
7486 7487 mutex_exit(&p->pcf_lock);
7487 7488 }
7488 7489 }
7489 7490 if (pcftotal_ret != NULL)
7490 7491 *pcftotal_ret = pcftotal;
7491 7492 return (0);
7492 7493 }
|
↓ open down ↓ |
7396 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX