Print this page
XXXX give me a better summary
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/vm/vm_swap.c
+++ new/usr/src/uts/common/vm/vm_swap.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1987, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
23 24 */
24 25
25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 27 /* All Rights Reserved */
27 28
28 29 /*
29 30 * University Copyright- Copyright (c) 1982, 1986, 1988
30 31 * The Regents of the University of California
31 32 * All Rights Reserved
32 33 *
33 34 * University Acknowledgment- Portions of this document are derived from
34 35 * software developed by the University of California, Berkeley, and its
35 36 * contributors.
36 37 */
37 38
38 39 /*
39 40 * Each physical swap area has an associated bitmap representing
40 41 * its physical storage. The bitmap records which swap slots are
41 42 * currently allocated or freed. Allocation is done by searching
42 43 * through the bitmap for the first free slot. Thus, there's
43 44 * no linear relation between offset within the swap device and the
44 45 * address (within its segment(s)) of the page that the slot backs;
45 46 * instead, it's an arbitrary one-to-one mapping.
46 47 *
47 48 * Associated with each swap area is a swapinfo structure. These
48 49 * structures are linked into a linear list that determines the
49 50 * ordering of swap areas in the logical swap device. Each contains a
50 51 * pointer to the corresponding bitmap, the area's size, and its
51 52 * associated vnode.
52 53 */
53 54
54 55 #include <sys/types.h>
55 56 #include <sys/inttypes.h>
56 57 #include <sys/param.h>
57 58 #include <sys/t_lock.h>
58 59 #include <sys/sysmacros.h>
59 60 #include <sys/systm.h>
60 61 #include <sys/errno.h>
61 62 #include <sys/kmem.h>
62 63 #include <sys/vfs.h>
63 64 #include <sys/vnode.h>
64 65 #include <sys/pathname.h>
65 66 #include <sys/cmn_err.h>
66 67 #include <sys/vtrace.h>
67 68 #include <sys/swap.h>
68 69 #include <sys/dumphdr.h>
69 70 #include <sys/debug.h>
70 71 #include <sys/fs/snode.h>
71 72 #include <sys/fs/swapnode.h>
72 73 #include <sys/policy.h>
73 74 #include <sys/zone.h>
74 75
75 76 #include <vm/as.h>
76 77 #include <vm/seg.h>
77 78 #include <vm/page.h>
78 79 #include <vm/seg_vn.h>
79 80 #include <vm/hat.h>
80 81 #include <vm/anon.h>
81 82 #include <vm/seg_map.h>
82 83
83 84 /*
84 85 * To balance the load among multiple swap areas, we don't allow
85 86 * more than swap_maxcontig allocations to be satisfied from a
86 87 * single swap area before moving on to the next swap area. This
87 88 * effectively "interleaves" allocations among the many swap areas.
88 89 */
89 90 int swap_maxcontig; /* set by anon_init() to 1 Mb */
90 91
91 92 #define MINIROOTSIZE 12000 /* ~6 Meg XXX */
92 93
93 94 /*
94 95 * XXX - this lock is a kludge. It serializes some aspects of swapadd() and
95 96 * swapdel() (namely VOP_OPEN, VOP_CLOSE, VN_RELE). It protects against
96 97 * somebody swapadd'ing and getting swap slots from a vnode, while someone
97 98 * else is in the process of closing or rele'ing it.
98 99 */
99 100 static kmutex_t swap_lock;
100 101
101 102 kmutex_t swapinfo_lock;
102 103
103 104 /*
104 105 * protected by the swapinfo_lock
105 106 */
106 107 struct swapinfo *swapinfo;
107 108
108 109 static struct swapinfo *silast;
109 110 static int nswapfiles;
110 111
111 112 static u_offset_t swap_getoff(struct swapinfo *);
112 113 static int swapadd(struct vnode *, ulong_t, ulong_t, char *);
113 114 static int swapdel(struct vnode *, ulong_t);
114 115 static int swapslot_free(struct vnode *, u_offset_t, struct swapinfo *);
115 116
116 117 /*
117 118 * swap device bitmap allocation macros
118 119 */
119 120 #define MAPSHIFT 5
120 121 #define NBBW (NBPW * NBBY) /* number of bits per word */
121 122 #define TESTBIT(map, i) (((map)[(i) >> MAPSHIFT] & (1 << (i) % NBBW)))
122 123 #define SETBIT(map, i) (((map)[(i) >> MAPSHIFT] |= (1 << (i) % NBBW)))
123 124 #define CLEARBIT(map, i) (((map)[(i) >> MAPSHIFT] &= ~(1 << (i) % NBBW)))
124 125
125 126 int swap_debug = 0; /* set for debug printf's */
126 127 int swap_verify = 0; /* set to verify slots when freeing and allocating */
|
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
127 128
128 129 uint_t swapalloc_maxcontig;
129 130
130 131 /*
131 132 * Allocate a range of up to *lenp contiguous slots (page) from a physical
132 133 * swap device. Flags are one of:
133 134 * SA_NOT Must have a slot from a physical swap device other than the
134 135 * the one containing input (*vpp, *offp).
135 136 * Less slots than requested may be returned. *lenp allocated slots are
136 137 * returned starting at *offp on *vpp.
137 - * Returns 1 for a successful allocation, 0 for couldn't allocate any slots.
138 + * Returns 1 for a successful allocation, 0 for couldn't allocate any slots,
139 + * and -1 when there are no swap devices on this system.
138 140 */
139 141 int
140 142 swap_phys_alloc(
141 143 struct vnode **vpp,
142 144 u_offset_t *offp,
143 145 size_t *lenp,
144 146 uint_t flags)
145 147 {
146 148 struct swapinfo *sip;
147 149 offset_t soff, noff;
148 150 size_t len;
149 151
150 152 mutex_enter(&swapinfo_lock);
153 + if (swapinfo == NULL) {
154 + /* NO SWAP DEVICES on this system currently. */
155 + mutex_exit(&swapinfo_lock);
156 + return (-1);
157 + }
151 158 sip = silast;
152 159
153 160 /* Find a desirable physical device and allocate from it. */
154 161 do {
155 162 if (sip == NULL)
156 163 break;
157 164 if (!(sip->si_flags & ST_INDEL) &&
158 165 (spgcnt_t)sip->si_nfpgs > 0) {
159 166 /* Caller wants other than specified swap device */
160 167 if (flags & SA_NOT) {
161 168 if (*vpp != sip->si_vp ||
162 169 *offp < sip->si_soff ||
163 170 *offp >= sip->si_eoff)
164 171 goto found;
165 172 /* Caller is loose, will take anything */
166 173 } else
167 174 goto found;
168 175 } else if (sip->si_nfpgs == 0)
169 176 sip->si_allocs = 0;
170 177 if ((sip = sip->si_next) == NULL)
171 178 sip = swapinfo;
172 179 } while (sip != silast);
173 180 mutex_exit(&swapinfo_lock);
174 181 return (0);
175 182 found:
176 183 soff = swap_getoff(sip);
177 184 sip->si_nfpgs--;
178 185 if (soff == -1)
179 186 panic("swap_alloc: swap_getoff failed!");
180 187
181 188 for (len = PAGESIZE; len < *lenp; len += PAGESIZE) {
182 189 if (sip->si_nfpgs == 0)
183 190 break;
184 191 if (swapalloc_maxcontig && len >= swapalloc_maxcontig)
185 192 break;
186 193 noff = swap_getoff(sip);
187 194 if (noff == -1) {
188 195 break;
189 196 } else if (noff != soff + len) {
190 197 CLEARBIT(sip->si_swapslots, btop(noff - sip->si_soff));
191 198 break;
192 199 }
193 200 sip->si_nfpgs--;
194 201 }
195 202 *vpp = sip->si_vp;
196 203 *offp = soff;
197 204 *lenp = len;
198 205 ASSERT((spgcnt_t)sip->si_nfpgs >= 0);
199 206 sip->si_allocs += btop(len);
200 207 if (sip->si_allocs >= swap_maxcontig) {
201 208 sip->si_allocs = 0;
202 209 if ((silast = sip->si_next) == NULL)
203 210 silast = swapinfo;
204 211 }
205 212 TRACE_2(TR_FAC_VM, TR_SWAP_ALLOC,
206 213 "swap_alloc:sip %p offset %lx", sip, soff);
207 214 mutex_exit(&swapinfo_lock);
208 215 return (1);
209 216 }
210 217
211 218 int swap_backsearch = 0;
212 219
213 220 /*
214 221 * Get a free offset on swap device sip.
215 222 * Return >=0 offset if succeeded, -1 for failure.
216 223 */
217 224 static u_offset_t
218 225 swap_getoff(struct swapinfo *sip)
219 226 {
220 227 uint_t *sp, *ep;
221 228 size_t aoff, boff, poff, slotnumber;
222 229
223 230 ASSERT(MUTEX_HELD(&swapinfo_lock));
224 231
225 232 sip->si_alloccnt++;
226 233 for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
227 234 ep = &sip->si_swapslots[sip->si_mapsize / NBPW]; sp < ep; sp++) {
228 235 if (*sp != (uint_t)0xffffffff)
229 236 goto foundentry;
230 237 else
231 238 sip->si_checkcnt++;
232 239 }
233 240 SWAP_PRINT(SW_ALLOC,
234 241 "swap_getoff: couldn't find slot from hint %ld to end\n",
235 242 sip->si_hint, 0, 0, 0, 0);
236 243 /*
237 244 * Go backwards? Check for faster method XXX
238 245 */
239 246 if (swap_backsearch) {
240 247 for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
241 248 ep = sip->si_swapslots; sp > ep; sp--) {
242 249 if (*sp != (uint_t)0xffffffff)
243 250 goto foundentry;
244 251 else
245 252 sip->si_checkcnt++;
246 253 }
247 254 } else {
248 255 for (sp = sip->si_swapslots,
249 256 ep = &sip->si_swapslots[sip->si_hint >> MAPSHIFT];
250 257 sp < ep; sp++) {
251 258 if (*sp != (uint_t)0xffffffff)
252 259 goto foundentry;
253 260 else
254 261 sip->si_checkcnt++;
255 262 }
256 263 }
257 264 if (*sp == 0xffffffff) {
258 265 cmn_err(CE_WARN, "No free swap slots!");
259 266 return ((u_offset_t)-1);
260 267 }
261 268
262 269 foundentry:
263 270 /*
264 271 * aoff is the page number offset (in bytes) of the si_swapslots
265 272 * array element containing a free page
266 273 *
267 274 * boff is the page number offset of the free page
268 275 * (i.e. cleared bit) in si_swapslots[aoff].
269 276 */
270 277 aoff = ((char *)sp - (char *)sip->si_swapslots) * NBBY;
271 278
272 279 for (boff = (sip->si_hint % NBBW); boff < NBBW; boff++) {
273 280 if (!TESTBIT(sip->si_swapslots, aoff + boff))
274 281 goto foundslot;
275 282 else
276 283 sip->si_checkcnt++;
277 284 }
278 285 for (boff = 0; boff < (sip->si_hint % NBBW); boff++) {
279 286 if (!TESTBIT(sip->si_swapslots, aoff + boff))
280 287 goto foundslot;
281 288 else
282 289 sip->si_checkcnt++;
283 290 }
284 291 panic("swap_getoff: didn't find slot in word hint %ld", sip->si_hint);
285 292
286 293 foundslot:
287 294 /*
288 295 * Return the offset of the free page in swap device.
289 296 * Convert page number of byte offset and add starting
290 297 * offset of swap device.
291 298 */
292 299 slotnumber = aoff + boff;
293 300 SWAP_PRINT(SW_ALLOC, "swap_getoff: allocating slot %ld\n",
294 301 slotnumber, 0, 0, 0, 0);
295 302 poff = ptob(slotnumber);
296 303 if (poff + sip->si_soff >= sip->si_eoff)
297 304 printf("ptob(aoff(%ld) + boff(%ld))(%ld) >= eoff(%ld)\n",
298 305 aoff, boff, ptob(slotnumber), (long)sip->si_eoff);
299 306 ASSERT(poff < sip->si_eoff);
300 307 /*
301 308 * We could verify here that the slot isn't already allocated
302 309 * by looking through all the anon slots.
303 310 */
304 311 SETBIT(sip->si_swapslots, slotnumber);
305 312 sip->si_hint = slotnumber + 1; /* hint = next slot */
306 313 return (poff + sip->si_soff);
307 314 }
308 315
309 316 /*
310 317 * Free a swap page.
311 318 */
312 319 void
313 320 swap_phys_free(struct vnode *vp, u_offset_t off, size_t len)
314 321 {
315 322 struct swapinfo *sip;
316 323 ssize_t pagenumber, npage;
317 324
318 325 mutex_enter(&swapinfo_lock);
319 326 sip = swapinfo;
320 327
321 328 do {
322 329 if (sip->si_vp == vp &&
323 330 sip->si_soff <= off && off < sip->si_eoff) {
324 331 for (pagenumber = btop(off - sip->si_soff),
325 332 npage = btop(len) + pagenumber;
326 333 pagenumber < npage; pagenumber++) {
327 334 SWAP_PRINT(SW_ALLOC,
328 335 "swap_phys_free: freeing slot %ld on "
329 336 "sip %p\n",
330 337 pagenumber, sip, 0, 0, 0);
331 338 if (!TESTBIT(sip->si_swapslots, pagenumber)) {
332 339 panic(
333 340 "swap_phys_free: freeing free slot "
334 341 "%p,%lx\n", (void *)vp,
335 342 ptob(pagenumber) + sip->si_soff);
336 343 }
337 344 CLEARBIT(sip->si_swapslots, pagenumber);
338 345 sip->si_nfpgs++;
339 346 }
340 347 ASSERT(sip->si_nfpgs <= sip->si_npgs);
341 348 mutex_exit(&swapinfo_lock);
342 349 return;
343 350 }
344 351 } while ((sip = sip->si_next) != NULL);
345 352 panic("swap_phys_free");
346 353 /*NOTREACHED*/
347 354 }
348 355
349 356 /*
350 357 * Return the anon struct corresponding for the given
351 358 * <vnode, off> if it is part of the virtual swap device.
352 359 * Return the anon struct if found, otherwise NULL.
353 360 */
354 361 struct anon *
355 362 swap_anon(struct vnode *vp, u_offset_t off)
356 363 {
357 364 struct anon *ap;
358 365
359 366 ASSERT(MUTEX_HELD(AH_MUTEX(vp, off)));
360 367
361 368 for (ap = anon_hash[ANON_HASH(vp, off)]; ap != NULL; ap = ap->an_hash) {
362 369 if (ap->an_vp == vp && ap->an_off == off)
363 370 return (ap);
364 371 }
365 372 return (NULL);
366 373 }
367 374
368 375
369 376 /*
370 377 * Determine if the vp offset range overlap a swap device.
371 378 */
372 379 int
373 380 swap_in_range(struct vnode *vp, u_offset_t offset, size_t len)
374 381 {
375 382 struct swapinfo *sip;
376 383 u_offset_t eoff;
377 384
378 385 eoff = offset + len;
379 386 ASSERT(eoff > offset);
380 387
381 388 mutex_enter(&swapinfo_lock);
382 389 sip = swapinfo;
383 390 if (vp && sip) {
384 391 do {
385 392 if (vp != sip->si_vp || eoff <= sip->si_soff ||
386 393 offset >= sip->si_eoff)
387 394 continue;
388 395 mutex_exit(&swapinfo_lock);
389 396 return (1);
390 397 } while ((sip = sip->si_next) != NULL);
391 398 }
392 399 mutex_exit(&swapinfo_lock);
393 400 return (0);
394 401 }
395 402
396 403 /*
397 404 * See if name is one of our swap files
398 405 * even though lookupname failed.
399 406 * This can be used by swapdel to delete
400 407 * swap resources on remote machines
401 408 * where the link has gone down.
402 409 */
403 410 static struct vnode *
404 411 swapdel_byname(
405 412 char *name, /* pathname to delete */
406 413 ulong_t lowblk) /* Low block number of area to delete */
407 414 {
408 415 struct swapinfo **sipp, *osip;
409 416 u_offset_t soff;
410 417
411 418 /*
412 419 * Find the swap file entry for the file to
413 420 * be deleted. Skip any entries that are in
414 421 * transition.
415 422 */
416 423
417 424 soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
418 425
419 426 mutex_enter(&swapinfo_lock);
420 427 for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
421 428 if ((strcmp(osip->si_pname, name) == 0) &&
422 429 (osip->si_soff == soff) && (osip->si_flags == 0)) {
423 430 struct vnode *vp = osip->si_vp;
424 431
425 432 VN_HOLD(vp);
426 433 mutex_exit(&swapinfo_lock);
427 434 return (vp);
428 435 }
429 436 }
430 437 mutex_exit(&swapinfo_lock);
431 438 return (NULL);
432 439 }
433 440
434 441
435 442 /*
436 443 * New system call to manipulate swap files.
437 444 */
438 445 int
439 446 swapctl(int sc_cmd, void *sc_arg, int *rv)
440 447 {
441 448 struct swapinfo *sip, *csip, *tsip;
442 449 int error = 0;
443 450 struct swapent st, *ust;
444 451 struct swapres sr;
445 452 struct vnode *vp;
446 453 int cnt = 0;
447 454 int tmp_nswapfiles;
448 455 int nswap;
449 456 int length, nlen;
450 457 int gplen = 0, plen;
451 458 char *swapname;
452 459 char *pname;
453 460 char *tpname;
454 461 struct anoninfo ai;
455 462 spgcnt_t avail;
456 463 int global = INGLOBALZONE(curproc);
457 464 struct zone *zp = curproc->p_zone;
458 465
459 466 /*
460 467 * When running in a zone we want to hide the details of the swap
461 468 * devices: we report there only being one swap device named "swap"
462 469 * having a size equal to the sum of the sizes of all real swap devices
463 470 * on the system.
464 471 */
465 472 switch (sc_cmd) {
466 473 case SC_GETNSWP:
467 474 if (global)
468 475 *rv = nswapfiles;
469 476 else
470 477 *rv = 1;
471 478 return (0);
472 479
473 480 case SC_AINFO:
474 481 /*
475 482 * Return anoninfo information with these changes:
476 483 * ani_max = maximum amount of swap space
477 484 * (including potentially available physical memory)
478 485 * ani_free = amount of unallocated anonymous memory
479 486 * (some of which might be reserved and including
480 487 * potentially available physical memory)
481 488 * ani_resv = amount of claimed (reserved) anonymous memory
482 489 */
483 490 avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
484 491 ai.ani_max = (k_anoninfo.ani_max +
485 492 k_anoninfo.ani_mem_resv) + avail;
486 493
487 494 /* Update ani_free */
488 495 set_anoninfo();
489 496 ai.ani_free = k_anoninfo.ani_free + avail;
490 497
491 498 ai.ani_resv = k_anoninfo.ani_phys_resv +
492 499 k_anoninfo.ani_mem_resv;
493 500
494 501 if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
495 502 /*
496 503 * We're in a non-global zone with a swap cap. We
497 504 * always report the system-wide values for the global
498 505 * zone, even though it too can have a swap cap.
499 506 */
500 507
501 508 /*
502 509 * For a swap-capped zone, the numbers are contrived
503 510 * since we don't have a correct value of 'reserved'
504 511 * for the zone.
505 512 *
506 513 * The ani_max value is always the zone's swap cap.
507 514 *
508 515 * The ani_free value is always the difference between
509 516 * the cap and the amount of swap in use by the zone.
510 517 *
511 518 * The ani_resv value is typically set to be the amount
512 519 * of swap in use by the zone, but can be adjusted
513 520 * upwards to indicate how much swap is currently
514 521 * unavailable to that zone due to usage by entities
515 522 * outside the zone.
516 523 *
517 524 * This works as follows.
518 525 *
519 526 * In the 'swap -s' output, the data is displayed
520 527 * as follows:
521 528 * allocated = ani_max - ani_free
522 529 * reserved = ani_resv - allocated
523 530 * available = ani_max - ani_resv
524 531 *
525 532 * Taking a contrived example, if the swap cap is 100
526 533 * and the amount of swap used by the zone is 75, this
527 534 * gives:
528 535 * allocated = ani_max - ani_free = 100 - 25 = 75
529 536 * reserved = ani_resv - allocated = 75 - 75 = 0
530 537 * available = ani_max - ani_resv = 100 - 75 = 25
531 538 *
532 539 * In this typical case, you can see that the 'swap -s'
533 540 * 'reserved' will always be 0 inside a swap capped
534 541 * zone.
535 542 *
536 543 * However, if the system as a whole has less free
537 544 * swap than the zone limits allow, then we adjust
538 545 * the ani_resv value up so that it is the difference
539 546 * between the zone cap and the amount of free system
540 547 * swap. Taking the above example, but when the
541 548 * system as a whole only has 20 of swap available, we
542 549 * get an ani_resv of 100 - 20 = 80. This gives:
543 550 * allocated = ani_max - ani_free = 100 - 25 = 75
544 551 * reserved = ani_resv - allocated = 80 - 75 = 5
545 552 * available = ani_max - ani_resv = 100 - 80 = 20
546 553 *
547 554 * In this case, you can see how the ani_resv value is
548 555 * tweaked up to make the 'swap -s' numbers work inside
549 556 * the zone.
550 557 */
551 558 rctl_qty_t cap, used;
552 559 pgcnt_t pgcap, sys_avail;
553 560
554 561 mutex_enter(&zp->zone_mem_lock);
555 562 cap = zp->zone_max_swap_ctl;
556 563 used = zp->zone_max_swap;
557 564 mutex_exit(&zp->zone_mem_lock);
558 565
559 566 pgcap = MIN(btop(cap), ai.ani_max);
560 567 ai.ani_free = pgcap - btop(used);
561 568
562 569 /* Get the system-wide swap currently available. */
563 570 sys_avail = ai.ani_max - ai.ani_resv;
564 571 if (sys_avail < ai.ani_free)
565 572 ai.ani_resv = pgcap - sys_avail;
566 573 else
567 574 ai.ani_resv = btop(used);
568 575
569 576 ai.ani_max = pgcap;
570 577 }
571 578
572 579 if (copyout(&ai, sc_arg, sizeof (struct anoninfo)) != 0)
573 580 return (EFAULT);
574 581 return (0);
575 582
576 583 case SC_LIST:
577 584 if (copyin(sc_arg, &length, sizeof (int)) != 0)
578 585 return (EFAULT);
579 586 if (!global) {
580 587 struct swapent st;
581 588 char *swappath = "swap";
582 589
583 590 if (length < 1)
584 591 return (ENOMEM);
585 592 ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
586 593 if (copyin(ust, &st, sizeof (swapent_t)) != 0)
587 594 return (EFAULT);
588 595 st.ste_start = PAGESIZE >> SCTRSHFT;
589 596 st.ste_length = (off_t)0;
590 597 st.ste_pages = 0;
591 598 st.ste_free = 0;
592 599 st.ste_flags = 0;
593 600
594 601 mutex_enter(&swapinfo_lock);
595 602 for (sip = swapinfo, nswap = 0;
596 603 sip != NULL && nswap < nswapfiles;
597 604 sip = sip->si_next, nswap++) {
598 605 st.ste_length +=
599 606 (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
600 607 st.ste_pages += sip->si_npgs;
601 608 st.ste_free += sip->si_nfpgs;
602 609 }
603 610 mutex_exit(&swapinfo_lock);
604 611
605 612 if (zp->zone_max_swap_ctl != UINT64_MAX) {
606 613 rctl_qty_t cap, used;
607 614
608 615 mutex_enter(&zp->zone_mem_lock);
609 616 cap = zp->zone_max_swap_ctl;
610 617 used = zp->zone_max_swap;
611 618 mutex_exit(&zp->zone_mem_lock);
612 619
613 620 st.ste_length = MIN(cap, st.ste_length);
614 621 st.ste_pages = MIN(btop(cap), st.ste_pages);
615 622 st.ste_free = MIN(st.ste_pages - btop(used),
616 623 st.ste_free);
617 624 }
618 625
619 626 if (copyout(&st, ust, sizeof (swapent_t)) != 0 ||
620 627 copyout(swappath, st.ste_path,
621 628 strlen(swappath) + 1) != 0) {
622 629 return (EFAULT);
623 630 }
624 631 *rv = 1;
625 632 return (0);
626 633 }
627 634 beginning:
628 635 tmp_nswapfiles = nswapfiles;
629 636 /* Return an error if not enough space for the whole table. */
630 637 if (length < tmp_nswapfiles)
631 638 return (ENOMEM);
632 639 /*
633 640 * Get memory to hold the swap entries and their names. We'll
634 641 * copy the real entries into these and then copy these out.
635 642 * Allocating the pathname memory is only a guess so we may
636 643 * find that we need more and have to do it again.
637 644 * All this is because we have to hold the anon lock while
638 645 * traversing the swapinfo list, and we can't be doing copyouts
639 646 * and/or kmem_alloc()s during this.
640 647 */
641 648 csip = kmem_zalloc(tmp_nswapfiles * sizeof (struct swapinfo),
642 649 KM_SLEEP);
643 650 retry:
644 651 nlen = tmp_nswapfiles * (gplen += 100);
645 652 pname = kmem_zalloc(nlen, KM_SLEEP);
646 653
647 654 mutex_enter(&swapinfo_lock);
648 655
649 656 if (tmp_nswapfiles != nswapfiles) {
650 657 mutex_exit(&swapinfo_lock);
651 658 kmem_free(pname, nlen);
652 659 kmem_free(csip,
653 660 tmp_nswapfiles * sizeof (struct swapinfo));
654 661 gplen = 0;
655 662 goto beginning;
656 663 }
657 664 for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
658 665 sip && nswap < tmp_nswapfiles;
659 666 sip = sip->si_next, tsip++, tpname += plen, nswap++) {
660 667 plen = sip->si_pnamelen;
661 668 if (tpname + plen - pname > nlen) {
662 669 mutex_exit(&swapinfo_lock);
663 670 kmem_free(pname, nlen);
664 671 goto retry;
665 672 }
666 673 *tsip = *sip;
667 674 tsip->si_pname = tpname;
668 675 (void) strcpy(tsip->si_pname, sip->si_pname);
669 676 }
670 677 mutex_exit(&swapinfo_lock);
671 678
672 679 if (sip) {
673 680 error = ENOMEM;
674 681 goto lout;
675 682 }
676 683 ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
677 684 for (tsip = csip, cnt = 0; cnt < nswap; tsip++, ust++, cnt++) {
678 685 if (copyin(ust, &st, sizeof (swapent_t)) != 0) {
679 686 error = EFAULT;
680 687 goto lout;
681 688 }
682 689 st.ste_flags = tsip->si_flags;
683 690 st.ste_length =
684 691 (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
685 692 st.ste_start = tsip->si_soff >> SCTRSHFT;
686 693 st.ste_pages = tsip->si_npgs;
687 694 st.ste_free = tsip->si_nfpgs;
688 695 if (copyout(&st, ust, sizeof (swapent_t)) != 0) {
689 696 error = EFAULT;
690 697 goto lout;
691 698 }
692 699 if (!tsip->si_pnamelen)
693 700 continue;
694 701 if (copyout(tsip->si_pname, st.ste_path,
695 702 tsip->si_pnamelen) != 0) {
696 703 error = EFAULT;
697 704 goto lout;
698 705 }
699 706 }
700 707 *rv = nswap;
701 708 lout:
702 709 kmem_free(csip, tmp_nswapfiles * sizeof (struct swapinfo));
703 710 kmem_free(pname, nlen);
704 711 return (error);
705 712
706 713 case SC_ADD:
707 714 case SC_REMOVE:
708 715 break;
709 716 default:
710 717 return (EINVAL);
711 718 }
712 719 if ((error = secpolicy_swapctl(CRED())) != 0)
713 720 return (error);
714 721
715 722 if (copyin(sc_arg, &sr, sizeof (swapres_t)))
716 723 return (EFAULT);
717 724
718 725 /* Allocate the space to read in pathname */
719 726 if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
720 727 return (ENOMEM);
721 728
722 729 error = copyinstr(sr.sr_name, swapname, MAXPATHLEN, 0);
723 730 if (error)
724 731 goto out;
725 732
726 733 error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
727 734 if (error) {
728 735 if (sc_cmd == SC_ADD)
729 736 goto out;
730 737 /* see if we match by name */
731 738 vp = swapdel_byname(swapname, (size_t)sr.sr_start);
732 739 if (vp == NULL)
733 740 goto out;
734 741 }
735 742
736 743 if (vp->v_flag & (VNOMAP | VNOSWAP)) {
737 744 VN_RELE(vp);
738 745 error = ENOSYS;
739 746 goto out;
740 747 }
741 748 switch (vp->v_type) {
742 749 case VBLK:
743 750 break;
744 751
745 752 case VREG:
746 753 if (vp->v_vfsp && vn_is_readonly(vp))
747 754 error = EROFS;
748 755 else
749 756 error = VOP_ACCESS(vp, VREAD|VWRITE, 0, CRED(), NULL);
750 757 break;
751 758
752 759 case VDIR:
753 760 error = EISDIR;
754 761 break;
755 762 default:
756 763 error = ENOSYS;
757 764 break;
758 765 }
759 766 if (error == 0) {
760 767 if (sc_cmd == SC_REMOVE)
761 768 error = swapdel(vp, sr.sr_start);
762 769 else
763 770 error = swapadd(vp, sr.sr_start,
764 771 sr.sr_length, swapname);
765 772 }
766 773 VN_RELE(vp);
767 774 out:
768 775 kmem_free(swapname, MAXPATHLEN);
769 776 return (error);
770 777 }
771 778
772 779 #if defined(_LP64) && defined(_SYSCALL32)
773 780
774 781 int
775 782 swapctl32(int sc_cmd, void *sc_arg, int *rv)
776 783 {
777 784 struct swapinfo *sip, *csip, *tsip;
778 785 int error = 0;
779 786 struct swapent32 st, *ust;
780 787 struct swapres32 sr;
781 788 struct vnode *vp;
782 789 int cnt = 0;
783 790 int tmp_nswapfiles;
784 791 int nswap;
785 792 int length, nlen;
786 793 int gplen = 0, plen;
787 794 char *swapname;
788 795 char *pname;
789 796 char *tpname;
790 797 struct anoninfo32 ai;
791 798 size_t s;
792 799 spgcnt_t avail;
793 800 int global = INGLOBALZONE(curproc);
794 801 struct zone *zp = curproc->p_zone;
795 802
796 803 /*
797 804 * When running in a zone we want to hide the details of the swap
798 805 * devices: we report there only being one swap device named "swap"
799 806 * having a size equal to the sum of the sizes of all real swap devices
800 807 * on the system.
801 808 */
802 809 switch (sc_cmd) {
803 810 case SC_GETNSWP:
804 811 if (global)
805 812 *rv = nswapfiles;
806 813 else
807 814 *rv = 1;
808 815 return (0);
809 816
810 817 case SC_AINFO:
811 818 /*
812 819 * Return anoninfo information with these changes:
813 820 * ani_max = maximum amount of swap space
814 821 * (including potentially available physical memory)
815 822 * ani_free = amount of unallocated anonymous memory
816 823 * (some of which might be reserved and including
817 824 * potentially available physical memory)
818 825 * ani_resv = amount of claimed (reserved) anonymous memory
819 826 */
820 827 avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
821 828 s = (k_anoninfo.ani_max + k_anoninfo.ani_mem_resv) + avail;
822 829 if (s > UINT32_MAX)
823 830 return (EOVERFLOW);
824 831 ai.ani_max = s;
825 832
826 833 /* Update ani_free */
827 834 set_anoninfo();
828 835 s = k_anoninfo.ani_free + avail;
829 836 if (s > UINT32_MAX)
830 837 return (EOVERFLOW);
831 838 ai.ani_free = s;
832 839
833 840 s = k_anoninfo.ani_phys_resv + k_anoninfo.ani_mem_resv;
834 841 if (s > UINT32_MAX)
835 842 return (EOVERFLOW);
836 843 ai.ani_resv = s;
837 844
838 845 if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
839 846 /*
840 847 * We're in a non-global zone with a swap cap. We
841 848 * always report the system-wide values for the global
842 849 * zone, even though it too can have a swap cap.
843 850 * See the comment for the SC_AINFO case in swapctl()
844 851 * which explains the following logic.
845 852 */
846 853 rctl_qty_t cap, used;
847 854 pgcnt_t pgcap, sys_avail;
848 855
849 856 mutex_enter(&zp->zone_mem_lock);
850 857 cap = zp->zone_max_swap_ctl;
851 858 used = zp->zone_max_swap;
852 859 mutex_exit(&zp->zone_mem_lock);
853 860
854 861 pgcap = MIN(btop(cap), ai.ani_max);
855 862 ai.ani_free = pgcap - btop(used);
856 863
857 864 /* Get the system-wide swap currently available. */
858 865 sys_avail = ai.ani_max - ai.ani_resv;
859 866 if (sys_avail < ai.ani_free)
860 867 ai.ani_resv = pgcap - sys_avail;
861 868 else
862 869 ai.ani_resv = btop(used);
863 870
864 871 ai.ani_max = pgcap;
865 872 }
866 873
867 874 if (copyout(&ai, sc_arg, sizeof (ai)) != 0)
868 875 return (EFAULT);
869 876 return (0);
870 877
871 878 case SC_LIST:
872 879 if (copyin(sc_arg, &length, sizeof (int32_t)) != 0)
873 880 return (EFAULT);
874 881 if (!global) {
875 882 struct swapent32 st;
876 883 char *swappath = "swap";
877 884
878 885 if (length < 1)
879 886 return (ENOMEM);
880 887 ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
881 888 if (copyin(ust, &st, sizeof (swapent32_t)) != 0)
882 889 return (EFAULT);
883 890 st.ste_start = PAGESIZE >> SCTRSHFT;
884 891 st.ste_length = (off_t)0;
885 892 st.ste_pages = 0;
886 893 st.ste_free = 0;
887 894 st.ste_flags = 0;
888 895
889 896 mutex_enter(&swapinfo_lock);
890 897 for (sip = swapinfo, nswap = 0;
891 898 sip != NULL && nswap < nswapfiles;
892 899 sip = sip->si_next, nswap++) {
893 900 st.ste_length +=
894 901 (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
895 902 st.ste_pages += sip->si_npgs;
896 903 st.ste_free += sip->si_nfpgs;
897 904 }
898 905 mutex_exit(&swapinfo_lock);
899 906
900 907 if (zp->zone_max_swap_ctl != UINT64_MAX) {
901 908 rctl_qty_t cap, used;
902 909
903 910 mutex_enter(&zp->zone_mem_lock);
904 911 cap = zp->zone_max_swap_ctl;
905 912 used = zp->zone_max_swap;
906 913 mutex_exit(&zp->zone_mem_lock);
907 914
908 915 st.ste_length = MIN(cap, st.ste_length);
909 916 st.ste_pages = MIN(btop(cap), st.ste_pages);
910 917 st.ste_free = MIN(st.ste_pages - btop(used),
911 918 st.ste_free);
912 919 }
913 920
914 921 if (copyout(&st, ust, sizeof (swapent32_t)) != 0 ||
915 922 copyout(swappath, (caddr_t)(uintptr_t)st.ste_path,
916 923 strlen(swappath) + 1) != 0) {
917 924 return (EFAULT);
918 925 }
919 926 *rv = 1;
920 927 return (0);
921 928 }
922 929 beginning:
923 930 tmp_nswapfiles = nswapfiles;
924 931 /* Return an error if not enough space for the whole table. */
925 932 if (length < tmp_nswapfiles)
926 933 return (ENOMEM);
927 934 /*
928 935 * Get memory to hold the swap entries and their names. We'll
929 936 * copy the real entries into these and then copy these out.
930 937 * Allocating the pathname memory is only a guess so we may
931 938 * find that we need more and have to do it again.
932 939 * All this is because we have to hold the anon lock while
933 940 * traversing the swapinfo list, and we can't be doing copyouts
934 941 * and/or kmem_alloc()s during this.
935 942 */
936 943 csip = kmem_zalloc(tmp_nswapfiles * sizeof (*csip), KM_SLEEP);
937 944 retry:
938 945 nlen = tmp_nswapfiles * (gplen += 100);
939 946 pname = kmem_zalloc(nlen, KM_SLEEP);
940 947
941 948 mutex_enter(&swapinfo_lock);
942 949
943 950 if (tmp_nswapfiles != nswapfiles) {
944 951 mutex_exit(&swapinfo_lock);
945 952 kmem_free(pname, nlen);
946 953 kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
947 954 gplen = 0;
948 955 goto beginning;
949 956 }
950 957 for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
951 958 (sip != NULL) && (nswap < tmp_nswapfiles);
952 959 sip = sip->si_next, tsip++, tpname += plen, nswap++) {
953 960 plen = sip->si_pnamelen;
954 961 if (tpname + plen - pname > nlen) {
955 962 mutex_exit(&swapinfo_lock);
956 963 kmem_free(pname, nlen);
957 964 goto retry;
958 965 }
959 966 *tsip = *sip;
960 967 tsip->si_pname = tpname;
961 968 (void) strcpy(tsip->si_pname, sip->si_pname);
962 969 }
963 970 mutex_exit(&swapinfo_lock);
964 971
965 972 if (sip != NULL) {
966 973 error = ENOMEM;
967 974 goto lout;
968 975 }
969 976 ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
970 977 for (tsip = csip, cnt = 0; cnt < nswap; tsip++, ust++, cnt++) {
971 978 if (copyin(ust, &st, sizeof (*ust)) != 0) {
972 979 error = EFAULT;
973 980 goto lout;
974 981 }
975 982 st.ste_flags = tsip->si_flags;
976 983 st.ste_length =
977 984 (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
978 985 st.ste_start = tsip->si_soff >> SCTRSHFT;
979 986 st.ste_pages = tsip->si_npgs;
980 987 st.ste_free = tsip->si_nfpgs;
981 988 if (copyout(&st, ust, sizeof (st)) != 0) {
982 989 error = EFAULT;
983 990 goto lout;
984 991 }
985 992 if (!tsip->si_pnamelen)
986 993 continue;
987 994 if (copyout(tsip->si_pname,
988 995 (caddr_t)(uintptr_t)st.ste_path,
989 996 tsip->si_pnamelen) != 0) {
990 997 error = EFAULT;
991 998 goto lout;
992 999 }
993 1000 }
994 1001 *rv = nswap;
995 1002 lout:
996 1003 kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
997 1004 kmem_free(pname, nlen);
998 1005 return (error);
999 1006
1000 1007 case SC_ADD:
1001 1008 case SC_REMOVE:
1002 1009 break;
1003 1010 default:
1004 1011 return (EINVAL);
1005 1012 }
1006 1013 if ((error = secpolicy_swapctl(CRED())) != 0)
1007 1014 return (error);
1008 1015
1009 1016 if (copyin(sc_arg, &sr, sizeof (sr)))
1010 1017 return (EFAULT);
1011 1018
1012 1019 /* Allocate the space to read in pathname */
1013 1020 if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
1014 1021 return (ENOMEM);
1015 1022
1016 1023 error = copyinstr((caddr_t)(uintptr_t)sr.sr_name,
1017 1024 swapname, MAXPATHLEN, NULL);
1018 1025 if (error)
1019 1026 goto out;
1020 1027
1021 1028 error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
1022 1029 if (error) {
1023 1030 if (sc_cmd == SC_ADD)
1024 1031 goto out;
1025 1032 /* see if we match by name */
1026 1033 vp = swapdel_byname(swapname, (uint_t)sr.sr_start);
1027 1034 if (vp == NULL)
1028 1035 goto out;
1029 1036 }
1030 1037
1031 1038 if (vp->v_flag & (VNOMAP | VNOSWAP)) {
1032 1039 VN_RELE(vp);
1033 1040 error = ENOSYS;
1034 1041 goto out;
1035 1042 }
1036 1043 switch (vp->v_type) {
1037 1044 case VBLK:
1038 1045 break;
1039 1046
1040 1047 case VREG:
1041 1048 if (vp->v_vfsp && vn_is_readonly(vp))
1042 1049 error = EROFS;
1043 1050 else
1044 1051 error = VOP_ACCESS(vp, VREAD|VWRITE, 0, CRED(), NULL);
1045 1052 break;
1046 1053
1047 1054 case VDIR:
1048 1055 error = EISDIR;
1049 1056 break;
1050 1057 default:
1051 1058 error = ENOSYS;
1052 1059 break;
1053 1060 }
1054 1061 if (error == 0) {
1055 1062 if (sc_cmd == SC_REMOVE)
1056 1063 error = swapdel(vp, sr.sr_start);
1057 1064 else
1058 1065 error = swapadd(vp, sr.sr_start, sr.sr_length,
1059 1066 swapname);
1060 1067 }
1061 1068 VN_RELE(vp);
1062 1069 out:
1063 1070 kmem_free(swapname, MAXPATHLEN);
1064 1071 return (error);
1065 1072 }
1066 1073
1067 1074 #endif /* _LP64 && _SYSCALL32 */
1068 1075
1069 1076 /*
1070 1077 * Add a new swap file.
1071 1078 */
1072 1079 int
1073 1080 swapadd(struct vnode *vp, ulong_t lowblk, ulong_t nblks, char *swapname)
1074 1081 {
1075 1082 struct swapinfo **sipp, *nsip = NULL, *esip = NULL;
1076 1083 struct vnode *cvp;
1077 1084 struct vattr vattr;
1078 1085 pgcnt_t pages;
1079 1086 u_offset_t soff, eoff;
1080 1087 int error;
1081 1088 ssize_t i, start, end;
1082 1089 ushort_t wasswap;
1083 1090 ulong_t startblk;
1084 1091 size_t returned_mem;
1085 1092
1086 1093 SWAP_PRINT(SW_CTL, "swapadd: vp %p lowblk %ld nblks %ld swapname %s\n",
1087 1094 vp, lowblk, nblks, swapname, 0);
1088 1095 /*
1089 1096 * Get the real vnode. (If vp is not a specnode it just returns vp, so
1090 1097 * it does the right thing, but having this code know about specnodes
1091 1098 * violates the spirit of having it be indepedent of vnode type.)
1092 1099 */
1093 1100 cvp = common_specvp(vp);
1094 1101
1095 1102 /*
1096 1103 * Or in VISSWAP so file system has chance to deny swap-ons during open.
1097 1104 */
1098 1105 mutex_enter(&cvp->v_lock);
1099 1106 wasswap = cvp->v_flag & VISSWAP;
1100 1107 cvp->v_flag |= VISSWAP;
1101 1108 mutex_exit(&cvp->v_lock);
1102 1109
1103 1110 mutex_enter(&swap_lock);
1104 1111 if (error = VOP_OPEN(&cvp, FREAD|FWRITE, CRED(), NULL)) {
1105 1112 mutex_exit(&swap_lock);
1106 1113 /* restore state of v_flag */
1107 1114 if (!wasswap) {
1108 1115 mutex_enter(&cvp->v_lock);
1109 1116 cvp->v_flag &= ~VISSWAP;
1110 1117 mutex_exit(&cvp->v_lock);
1111 1118 }
1112 1119 return (error);
1113 1120 }
1114 1121 mutex_exit(&swap_lock);
1115 1122
1116 1123 /*
1117 1124 * Get partition size. Return error if empty partition,
1118 1125 * or if request does not fit within the partition.
1119 1126 * If this is the first swap device, we can reduce
1120 1127 * the size of the swap area to match what is
1121 1128 * available. This can happen if the system was built
1122 1129 * on a machine with a different size swap partition.
1123 1130 */
1124 1131 vattr.va_mask = AT_SIZE;
1125 1132 if (error = VOP_GETATTR(cvp, &vattr, ATTR_COMM, CRED(), NULL))
1126 1133 goto out;
1127 1134
1128 1135 /*
1129 1136 * Specfs returns a va_size of MAXOFFSET_T (UNKNOWN_SIZE) when the
1130 1137 * size of the device can't be determined.
1131 1138 */
1132 1139 if ((vattr.va_size == 0) || (vattr.va_size == MAXOFFSET_T)) {
1133 1140 error = EINVAL;
1134 1141 goto out;
1135 1142 }
1136 1143
1137 1144 #ifdef _ILP32
1138 1145 /*
1139 1146 * No support for large swap in 32-bit OS, if the size of the swap is
1140 1147 * bigger than MAXOFF32_T then the size used by swapfs must be limited.
1141 1148 * This limitation is imposed by the swap subsystem itself, a D_64BIT
1142 1149 * driver as the target of swap operation should be able to field
1143 1150 * the IO.
1144 1151 */
1145 1152 if (vattr.va_size > MAXOFF32_T) {
1146 1153 cmn_err(CE_NOTE,
1147 1154 "!swap device %s truncated from 0x%llx to 0x%x bytes",
1148 1155 swapname, vattr.va_size, MAXOFF32_T);
1149 1156 vattr.va_size = MAXOFF32_T;
1150 1157 }
1151 1158 #endif /* _ILP32 */
1152 1159
1153 1160 /* Fail if file not writeable (try to set size to current size) */
1154 1161 vattr.va_mask = AT_SIZE;
1155 1162 if (error = VOP_SETATTR(cvp, &vattr, 0, CRED(), NULL))
1156 1163 goto out;
1157 1164
1158 1165 /* Fail if fs does not support VOP_PAGEIO */
1159 1166 error = VOP_PAGEIO(cvp, (page_t *)NULL, (u_offset_t)0, 0, 0, CRED(),
1160 1167 NULL);
1161 1168
1162 1169 if (error == ENOSYS)
1163 1170 goto out;
1164 1171 else
1165 1172 error = 0;
1166 1173 /*
1167 1174 * If swapping on the root filesystem don't put swap blocks that
1168 1175 * correspond to the miniroot filesystem on the swap free list.
1169 1176 */
1170 1177 if (cvp == rootdir)
1171 1178 startblk = roundup(MINIROOTSIZE<<SCTRSHFT, klustsize)>>SCTRSHFT;
1172 1179 else /* Skip 1st page (disk label) */
1173 1180 startblk = (ulong_t)(lowblk ? lowblk : 1);
1174 1181
1175 1182 soff = startblk << SCTRSHFT;
1176 1183 if (soff >= vattr.va_size) {
1177 1184 error = EINVAL;
1178 1185 goto out;
1179 1186 }
1180 1187
1181 1188 /*
1182 1189 * If user specified 0 blks, use the size of the device
1183 1190 */
1184 1191 eoff = nblks ? soff + (nblks - (startblk - lowblk) << SCTRSHFT) :
1185 1192 vattr.va_size;
1186 1193
1187 1194 SWAP_PRINT(SW_CTL, "swapadd: va_size %ld soff %ld eoff %ld\n",
1188 1195 vattr.va_size, soff, eoff, 0, 0);
1189 1196
1190 1197 if (eoff > vattr.va_size) {
1191 1198 error = EINVAL;
1192 1199 goto out;
1193 1200 }
1194 1201
1195 1202 /*
1196 1203 * The starting and ending offsets must be page aligned.
1197 1204 * Round soff up to next page boundary, round eoff
1198 1205 * down to previous page boundary.
1199 1206 */
1200 1207 soff = ptob(btopr(soff));
1201 1208 eoff = ptob(btop(eoff));
1202 1209 if (soff >= eoff) {
1203 1210 SWAP_PRINT(SW_CTL, "swapadd: soff %ld >= eoff %ld\n",
1204 1211 soff, eoff, 0, 0, 0);
1205 1212 error = EINVAL;
1206 1213 goto out;
1207 1214 }
1208 1215
1209 1216 pages = btop(eoff - soff);
1210 1217
1211 1218 /* Allocate and partially set up the new swapinfo */
1212 1219 nsip = kmem_zalloc(sizeof (struct swapinfo), KM_SLEEP);
1213 1220 nsip->si_vp = cvp;
1214 1221
1215 1222 nsip->si_soff = soff;
1216 1223 nsip->si_eoff = eoff;
1217 1224 nsip->si_hint = 0;
1218 1225 nsip->si_checkcnt = nsip->si_alloccnt = 0;
1219 1226
1220 1227 nsip->si_pnamelen = (int)strlen(swapname) + 1;
1221 1228 nsip->si_pname = (char *)kmem_zalloc(nsip->si_pnamelen, KM_SLEEP);
1222 1229 bcopy(swapname, nsip->si_pname, nsip->si_pnamelen - 1);
1223 1230 SWAP_PRINT(SW_CTL, "swapadd: allocating swapinfo for %s, %ld pages\n",
1224 1231 swapname, pages, 0, 0, 0);
1225 1232 /*
1226 1233 * Size of swapslots map in bytes
1227 1234 */
1228 1235 nsip->si_mapsize = P2ROUNDUP(pages, NBBW) / NBBY;
1229 1236 nsip->si_swapslots = kmem_zalloc(nsip->si_mapsize, KM_SLEEP);
1230 1237
1231 1238 /*
1232 1239 * Permanently set the bits that can't ever be allocated,
1233 1240 * i.e. those from the ending offset to the round up slot for the
1234 1241 * swapslots bit map.
1235 1242 */
1236 1243 start = pages;
1237 1244 end = P2ROUNDUP(pages, NBBW);
1238 1245 for (i = start; i < end; i++) {
1239 1246 SWAP_PRINT(SW_CTL, "swapadd: set bit for page %ld\n", i,
1240 1247 0, 0, 0, 0);
1241 1248 SETBIT(nsip->si_swapslots, i);
1242 1249 }
1243 1250 nsip->si_npgs = nsip->si_nfpgs = pages;
1244 1251 /*
1245 1252 * Now check to see if we can add it. We wait til now to check because
1246 1253 * we need the swapinfo_lock and we don't want sleep with it (e.g.,
1247 1254 * during kmem_alloc()) while we're setting up the swapinfo.
1248 1255 */
1249 1256 mutex_enter(&swapinfo_lock);
1250 1257 for (sipp = &swapinfo; (esip = *sipp) != NULL; sipp = &esip->si_next) {
1251 1258 if (esip->si_vp == cvp) {
1252 1259 if (esip->si_soff == soff && esip->si_npgs == pages &&
1253 1260 (esip->si_flags & ST_DOINGDEL)) {
1254 1261 /*
1255 1262 * We are adding a device that we are in the
1256 1263 * middle of deleting. Just clear the
1257 1264 * ST_DOINGDEL flag to signal this and
1258 1265 * the deletion routine will eventually notice
1259 1266 * it and add it back.
1260 1267 */
1261 1268 esip->si_flags &= ~ST_DOINGDEL;
1262 1269 mutex_exit(&swapinfo_lock);
1263 1270 goto out;
1264 1271 }
1265 1272 /* disallow overlapping swap files */
1266 1273 if ((soff < esip->si_eoff) && (eoff > esip->si_soff)) {
1267 1274 error = EEXIST;
1268 1275 mutex_exit(&swapinfo_lock);
1269 1276 goto out;
1270 1277 }
1271 1278 }
1272 1279 }
1273 1280
1274 1281 nswapfiles++;
1275 1282
1276 1283 /*
1277 1284 * add new swap device to list and shift allocations to it
1278 1285 * before updating the anoninfo counters
1279 1286 */
1280 1287 *sipp = nsip;
1281 1288 silast = nsip;
1282 1289
1283 1290 /*
1284 1291 * Update the total amount of reservable swap space
1285 1292 * accounting properly for swap space from physical memory
1286 1293 */
1287 1294 /* New swap device soaks up currently reserved memory swap */
1288 1295 mutex_enter(&anoninfo_lock);
1289 1296
1290 1297 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1291 1298 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1292 1299
1293 1300 k_anoninfo.ani_max += pages;
1294 1301 ANI_ADD(pages);
1295 1302 if (k_anoninfo.ani_mem_resv > k_anoninfo.ani_locked_swap) {
1296 1303 returned_mem = MIN(k_anoninfo.ani_mem_resv -
1297 1304 k_anoninfo.ani_locked_swap,
1298 1305 k_anoninfo.ani_max - k_anoninfo.ani_phys_resv);
1299 1306
1300 1307 ANI_ADD(-returned_mem);
1301 1308 k_anoninfo.ani_free -= returned_mem;
1302 1309 k_anoninfo.ani_mem_resv -= returned_mem;
1303 1310 k_anoninfo.ani_phys_resv += returned_mem;
1304 1311
1305 1312 mutex_enter(&freemem_lock);
1306 1313 availrmem += returned_mem;
1307 1314 mutex_exit(&freemem_lock);
1308 1315 }
1309 1316 /*
1310 1317 * At boot time, to permit booting small memory machines using
1311 1318 * only physical memory as swap space, we allowed a dangerously
1312 1319 * large amount of memory to be used as swap space; now that
1313 1320 * more physical backing store is available bump down the amount
1314 1321 * we can get from memory to a safer size.
1315 1322 */
1316 1323 if (swapfs_minfree < swapfs_desfree) {
1317 1324 mutex_enter(&freemem_lock);
1318 1325 if (availrmem > swapfs_desfree || !k_anoninfo.ani_mem_resv)
1319 1326 swapfs_minfree = swapfs_desfree;
1320 1327 mutex_exit(&freemem_lock);
1321 1328 }
1322 1329
1323 1330 SWAP_PRINT(SW_CTL, "swapadd: ani_max %ld ani_free %ld\n",
1324 1331 k_anoninfo.ani_free, k_anoninfo.ani_free, 0, 0, 0);
1325 1332
1326 1333 mutex_exit(&anoninfo_lock);
1327 1334
1328 1335 mutex_exit(&swapinfo_lock);
1329 1336
1330 1337 /* Initialize the dump device */
1331 1338 mutex_enter(&dump_lock);
1332 1339 if (dumpvp == NULL)
1333 1340 (void) dumpinit(vp, swapname, 0);
1334 1341 mutex_exit(&dump_lock);
1335 1342
1336 1343 VN_HOLD(cvp);
1337 1344 out:
1338 1345 if (error || esip) {
1339 1346 SWAP_PRINT(SW_CTL, "swapadd: error (%d)\n", error, 0, 0, 0, 0);
1340 1347
1341 1348 if (!wasswap) {
1342 1349 mutex_enter(&cvp->v_lock);
1343 1350 cvp->v_flag &= ~VISSWAP;
1344 1351 mutex_exit(&cvp->v_lock);
1345 1352 }
1346 1353 if (nsip) {
1347 1354 kmem_free(nsip->si_swapslots, (size_t)nsip->si_mapsize);
1348 1355 kmem_free(nsip->si_pname, nsip->si_pnamelen);
1349 1356 kmem_free(nsip, sizeof (*nsip));
1350 1357 }
1351 1358 mutex_enter(&swap_lock);
1352 1359 (void) VOP_CLOSE(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(),
1353 1360 NULL);
1354 1361 mutex_exit(&swap_lock);
1355 1362 }
1356 1363 return (error);
1357 1364 }
1358 1365
1359 1366 /*
1360 1367 * Delete a swap file.
1361 1368 */
1362 1369 static int
1363 1370 swapdel(
1364 1371 struct vnode *vp,
1365 1372 ulong_t lowblk) /* Low block number of area to delete. */
1366 1373 {
1367 1374 struct swapinfo **sipp, *osip = NULL;
1368 1375 struct vnode *cvp;
1369 1376 u_offset_t soff;
1370 1377 int error = 0;
1371 1378 u_offset_t toff = 0;
1372 1379 struct vnode *tvp = NULL;
1373 1380 spgcnt_t pages;
1374 1381 struct anon **app, *ap;
1375 1382 kmutex_t *ahm;
1376 1383 pgcnt_t adjust_swap = 0;
1377 1384
1378 1385 /* Find the swap file entry for the file to be deleted */
1379 1386 cvp = common_specvp(vp);
1380 1387
1381 1388
1382 1389 lowblk = lowblk ? lowblk : 1; /* Skip first page (disk label) */
1383 1390 soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
1384 1391
1385 1392 mutex_enter(&swapinfo_lock);
1386 1393 for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
1387 1394 if ((osip->si_vp == cvp) &&
1388 1395 (osip->si_soff == soff) && (osip->si_flags == 0))
1389 1396 break;
1390 1397 }
1391 1398
1392 1399 /* If the file was not found, error. */
1393 1400 if (osip == NULL) {
1394 1401 error = EINVAL;
1395 1402 mutex_exit(&swapinfo_lock);
1396 1403 goto out;
1397 1404 }
1398 1405
1399 1406 pages = osip->si_npgs;
1400 1407
1401 1408 /*
1402 1409 * Do not delete if we will be low on swap pages.
1403 1410 */
1404 1411 mutex_enter(&anoninfo_lock);
1405 1412
1406 1413 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1407 1414 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1408 1415
1409 1416 mutex_enter(&freemem_lock);
1410 1417 if (((k_anoninfo.ani_max - k_anoninfo.ani_phys_resv) +
1411 1418 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0)) < pages) {
1412 1419 mutex_exit(&freemem_lock);
1413 1420 mutex_exit(&anoninfo_lock);
1414 1421 error = ENOMEM;
1415 1422 cmn_err(CE_WARN, "swapdel - too few free pages");
1416 1423 mutex_exit(&swapinfo_lock);
1417 1424 goto out;
1418 1425 }
1419 1426 mutex_exit(&freemem_lock);
1420 1427
1421 1428 k_anoninfo.ani_max -= pages;
1422 1429
1423 1430 /* If needed, reserve memory swap to replace old device */
1424 1431 if (k_anoninfo.ani_phys_resv > k_anoninfo.ani_max) {
1425 1432 adjust_swap = k_anoninfo.ani_phys_resv - k_anoninfo.ani_max;
1426 1433 k_anoninfo.ani_phys_resv -= adjust_swap;
1427 1434 k_anoninfo.ani_mem_resv += adjust_swap;
1428 1435 mutex_enter(&freemem_lock);
1429 1436 availrmem -= adjust_swap;
1430 1437 mutex_exit(&freemem_lock);
1431 1438 ANI_ADD(adjust_swap);
1432 1439 }
1433 1440 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1434 1441 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1435 1442 mutex_exit(&anoninfo_lock);
1436 1443
1437 1444 ANI_ADD(-pages);
1438 1445
1439 1446 /*
1440 1447 * Set the delete flag. This prevents anyone from allocating more
1441 1448 * pages from this file. Also set ST_DOINGDEL. Someone who wants to
1442 1449 * add the file back while we're deleting it will signify by clearing
1443 1450 * this flag.
1444 1451 */
1445 1452 osip->si_flags |= ST_INDEL|ST_DOINGDEL;
1446 1453 mutex_exit(&swapinfo_lock);
1447 1454
1448 1455 /*
1449 1456 * Free all the allocated physical slots for this file. We do this
1450 1457 * by walking through the entire anon hash array, because we need
1451 1458 * to update all the anon slots that have physical swap slots on
1452 1459 * this file, and this is the only way to find them all. We go back
1453 1460 * to the beginning of a bucket after each slot is freed because the
1454 1461 * anonhash_lock is not held during the free and thus the hash table
1455 1462 * may change under us.
1456 1463 */
1457 1464 for (app = anon_hash; app < &anon_hash[ANON_HASH_SIZE]; app++) {
1458 1465 ahm = &anonhash_lock[(app - anon_hash) &
1459 1466 (AH_LOCK_SIZE - 1)].pad_mutex;
1460 1467 mutex_enter(ahm);
1461 1468 top:
1462 1469 for (ap = *app; ap != NULL; ap = ap->an_hash) {
1463 1470 if (ap->an_pvp == cvp &&
1464 1471 ap->an_poff >= osip->si_soff &&
1465 1472 ap->an_poff < osip->si_eoff) {
1466 1473 ASSERT(TESTBIT(osip->si_swapslots,
1467 1474 btop((size_t)(ap->an_poff -
1468 1475 osip->si_soff))));
1469 1476 tvp = ap->an_vp;
1470 1477 toff = ap->an_off;
1471 1478 VN_HOLD(tvp);
1472 1479 mutex_exit(ahm);
1473 1480
1474 1481 error = swapslot_free(tvp, toff, osip);
1475 1482
1476 1483 VN_RELE(tvp);
1477 1484 mutex_enter(ahm);
1478 1485 if (!error && (osip->si_flags & ST_DOINGDEL)) {
1479 1486 goto top;
1480 1487 } else {
1481 1488 if (error) {
1482 1489 cmn_err(CE_WARN,
1483 1490 "swapslot_free failed %d",
1484 1491 error);
1485 1492 }
1486 1493
1487 1494 /*
1488 1495 * Add device back before making it
1489 1496 * visible.
1490 1497 */
1491 1498 mutex_enter(&swapinfo_lock);
1492 1499 osip->si_flags &=
1493 1500 ~(ST_INDEL | ST_DOINGDEL);
1494 1501 mutex_exit(&swapinfo_lock);
1495 1502
1496 1503 /*
1497 1504 * Update the anon space available
1498 1505 */
1499 1506 mutex_enter(&anoninfo_lock);
1500 1507
1501 1508 k_anoninfo.ani_phys_resv += adjust_swap;
1502 1509 k_anoninfo.ani_mem_resv -= adjust_swap;
1503 1510 k_anoninfo.ani_max += pages;
1504 1511
1505 1512 mutex_enter(&freemem_lock);
1506 1513 availrmem += adjust_swap;
1507 1514 mutex_exit(&freemem_lock);
1508 1515
1509 1516 mutex_exit(&anoninfo_lock);
1510 1517
1511 1518 ANI_ADD(pages);
1512 1519
1513 1520 mutex_exit(ahm);
1514 1521 goto out;
1515 1522 }
1516 1523 }
1517 1524 }
1518 1525 mutex_exit(ahm);
1519 1526 }
1520 1527
1521 1528 /* All done, they'd better all be free! */
1522 1529 mutex_enter(&swapinfo_lock);
1523 1530 ASSERT(osip->si_nfpgs == osip->si_npgs);
1524 1531
1525 1532 /* Now remove it from the swapinfo list */
1526 1533 for (sipp = &swapinfo; *sipp != NULL; sipp = &(*sipp)->si_next) {
1527 1534 if (*sipp == osip)
1528 1535 break;
1529 1536 }
1530 1537 ASSERT(*sipp);
1531 1538 *sipp = osip->si_next;
1532 1539 if (silast == osip)
1533 1540 if ((silast = osip->si_next) == NULL)
1534 1541 silast = swapinfo;
1535 1542 nswapfiles--;
1536 1543 mutex_exit(&swapinfo_lock);
1537 1544
1538 1545 kmem_free(osip->si_swapslots, osip->si_mapsize);
1539 1546 kmem_free(osip->si_pname, osip->si_pnamelen);
1540 1547 kmem_free(osip, sizeof (*osip));
1541 1548
1542 1549 mutex_enter(&dump_lock);
1543 1550 if (cvp == dumpvp)
1544 1551 dumpfini();
1545 1552 mutex_exit(&dump_lock);
1546 1553
1547 1554 /* Release the vnode */
1548 1555
1549 1556 mutex_enter(&swap_lock);
1550 1557 (void) VOP_CLOSE(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(), NULL);
1551 1558 mutex_enter(&cvp->v_lock);
1552 1559 cvp->v_flag &= ~VISSWAP;
1553 1560 mutex_exit(&cvp->v_lock);
1554 1561 VN_RELE(cvp);
1555 1562 mutex_exit(&swap_lock);
1556 1563 out:
1557 1564 return (error);
1558 1565 }
1559 1566
1560 1567 /*
1561 1568 * Free up a physical swap slot on swapinfo sip, currently in use by the
1562 1569 * anonymous page whose name is (vp, off).
1563 1570 */
1564 1571 static int
1565 1572 swapslot_free(
1566 1573 struct vnode *vp,
1567 1574 u_offset_t off,
1568 1575 struct swapinfo *sip)
1569 1576 {
1570 1577 struct page *pp = NULL;
1571 1578 struct anon *ap = NULL;
1572 1579 int error = 0;
1573 1580 kmutex_t *ahm;
1574 1581 struct vnode *pvp = NULL;
1575 1582 u_offset_t poff;
1576 1583 int alloc_pg = 0;
1577 1584
1578 1585 ASSERT(sip->si_vp != NULL);
1579 1586 /*
1580 1587 * Get the page for the old swap slot if exists or create a new one.
1581 1588 */
1582 1589 again:
1583 1590 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
1584 1591 pp = page_create_va(vp, off, PAGESIZE, PG_WAIT | PG_EXCL,
1585 1592 segkmap, NULL);
1586 1593 if (pp == NULL)
1587 1594 goto again;
1588 1595 alloc_pg = 1;
1589 1596
1590 1597 error = swap_getphysname(vp, off, &pvp, &poff);
1591 1598 if (error || pvp != sip->si_vp || poff < sip->si_soff ||
1592 1599 poff >= sip->si_eoff) {
1593 1600 page_io_unlock(pp);
1594 1601 /*LINTED: constant in conditional context*/
1595 1602 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1596 1603 return (0);
1597 1604 }
1598 1605
1599 1606 error = VOP_PAGEIO(pvp, pp, poff, PAGESIZE, B_READ,
1600 1607 CRED(), NULL);
1601 1608 if (error) {
1602 1609 page_io_unlock(pp);
1603 1610 if (error == EFAULT)
1604 1611 error = 0;
1605 1612 /*LINTED: constant in conditional context*/
1606 1613 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1607 1614 return (error);
1608 1615 }
1609 1616 }
1610 1617
1611 1618 /*
1612 1619 * The anon could have been removed by anon_decref* and/or reallocated
1613 1620 * by anon layer (an_pvp == NULL) with the same vp, off.
1614 1621 * In this case the page which has been allocated needs to
1615 1622 * be freed.
1616 1623 */
1617 1624 if (!alloc_pg)
1618 1625 page_io_lock(pp);
1619 1626 ahm = AH_MUTEX(vp, off);
1620 1627 mutex_enter(ahm);
1621 1628 ap = swap_anon(vp, off);
1622 1629 if ((ap == NULL || ap->an_pvp == NULL) && alloc_pg) {
1623 1630 mutex_exit(ahm);
1624 1631 page_io_unlock(pp);
1625 1632 /*LINTED: constant in conditional context*/
1626 1633 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1627 1634 return (0);
1628 1635 }
1629 1636
1630 1637 /*
1631 1638 * Free the physical slot. It may have been freed up and replaced with
1632 1639 * another one while we were getting the page so we have to re-verify
1633 1640 * that this is really one we want. If we do free the slot we have
1634 1641 * to mark the page modified, as its backing store is now gone.
1635 1642 */
1636 1643 if ((ap != NULL) && (ap->an_pvp == sip->si_vp && ap->an_poff >=
1637 1644 sip->si_soff && ap->an_poff < sip->si_eoff)) {
1638 1645 swap_phys_free(ap->an_pvp, ap->an_poff, PAGESIZE);
1639 1646 ap->an_pvp = NULL;
1640 1647 ap->an_poff = 0;
1641 1648 mutex_exit(ahm);
1642 1649 hat_setmod(pp);
1643 1650 } else {
1644 1651 mutex_exit(ahm);
1645 1652 }
1646 1653 page_io_unlock(pp);
1647 1654 page_unlock(pp);
1648 1655 return (0);
1649 1656 }
1650 1657
|
↓ open down ↓ |
1490 lines elided |
↑ open up ↑ |
1651 1658
1652 1659 /*
1653 1660 * Get contig physical backing store for vp, in the range
1654 1661 * [*offp, *offp + *lenp), May back a subrange of this, but must
1655 1662 * always include the requested offset or fail. Returns the offsets
1656 1663 * backed as [*offp, *offp + *lenp) and the physical offsets used to
1657 1664 * back them from *pvpp in the range [*pstartp, *pstartp + *lenp).
1658 1665 * Returns 0 for success
1659 1666 * SE_NOANON -- no anon slot for requested paged
1660 1667 * SE_NOSWAP -- no physical swap space available
1668 + * SE_NODEV -- no swap devices on this system
1661 1669 */
1662 1670 int
1663 1671 swap_newphysname(
1664 1672 struct vnode *vp,
1665 1673 u_offset_t offset,
1666 1674 u_offset_t *offp,
1667 1675 size_t *lenp,
1668 1676 struct vnode **pvpp,
1669 1677 u_offset_t *poffp)
1670 1678 {
1671 1679 struct anon *ap = NULL; /* anon slot for vp, off */
1672 1680 int error = 0;
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1673 1681 struct vnode *pvp;
1674 1682 u_offset_t poff, pstart, prem;
1675 1683 size_t plen;
1676 1684 u_offset_t off, start;
1677 1685 kmutex_t *ahm;
1678 1686
1679 1687 ASSERT(*offp <= offset && offset < *offp + *lenp);
1680 1688
1681 1689 /* Get new physical swap slots. */
1682 1690 plen = *lenp;
1683 - if (!swap_phys_alloc(&pvp, &pstart, &plen, 0)) {
1691 + error = swap_phys_alloc(&pvp, &pstart, &plen, 0);
1692 + if (error != 1) {
1684 1693 /*
1685 1694 * No swap available so return error unless requested
1686 1695 * offset is already backed in which case return that.
1687 1696 */
1688 1697 ahm = AH_MUTEX(vp, offset);
1689 1698 mutex_enter(ahm);
1690 1699 if ((ap = swap_anon(vp, offset)) == NULL) {
1691 1700 error = SE_NOANON;
1692 1701 mutex_exit(ahm);
1693 1702 return (error);
1694 1703 }
1695 - error = (ap->an_pvp ? 0 : SE_NOSWAP);
1704 + error = (ap->an_pvp ? 0 : (error == 0) ? SE_NOSWAP : SE_NODEV);
1696 1705 *offp = offset;
1697 1706 *lenp = PAGESIZE;
1698 1707 *pvpp = ap->an_pvp;
1699 1708 *poffp = ap->an_poff;
1700 1709 mutex_exit(ahm);
1701 1710 return (error);
1702 1711 }
1703 1712
1704 1713 /*
1705 1714 * We got plen (<= *lenp) contig slots. Use these to back a
1706 1715 * subrange of [*offp, *offp + *lenp) which includes offset.
1707 1716 * For now we just put offset at the end of the kluster.
1708 1717 * Clearly there are other possible choices - which is best?
1709 1718 */
1710 1719 start = MAX(*offp,
1711 1720 (offset + PAGESIZE > plen) ? (offset + PAGESIZE - plen) : 0);
1712 1721 ASSERT(start + plen <= *offp + *lenp);
1713 1722
1714 1723 for (off = start, poff = pstart; poff < pstart + plen;
1715 1724 off += PAGESIZE, poff += PAGESIZE) {
1716 1725 ahm = AH_MUTEX(vp, off);
1717 1726 mutex_enter(ahm);
1718 1727 if ((ap = swap_anon(vp, off)) != NULL) {
1719 1728 /* Free old slot if any, and assign new one */
1720 1729 if (ap->an_pvp)
1721 1730 swap_phys_free(ap->an_pvp, ap->an_poff,
1722 1731 PAGESIZE);
1723 1732 ap->an_pvp = pvp;
1724 1733 ap->an_poff = poff;
1725 1734 } else { /* No anon slot for a klustered page, quit. */
1726 1735 prem = (pstart + plen) - poff;
1727 1736 /* Already did requested page, do partial kluster */
1728 1737 if (off > offset) {
1729 1738 plen = poff - pstart;
1730 1739 error = 0;
1731 1740 /* Fail on requested page, error */
1732 1741 } else if (off == offset) {
1733 1742 error = SE_NOANON;
1734 1743 /* Fail on prior page, fail on requested page, error */
1735 1744 } else if ((ap = swap_anon(vp, offset)) == NULL) {
1736 1745 error = SE_NOANON;
1737 1746 /* Fail on prior page, got requested page, do only it */
1738 1747 } else {
1739 1748 /* Free old slot if any, and assign new one */
1740 1749 if (ap->an_pvp)
1741 1750 swap_phys_free(ap->an_pvp, ap->an_poff,
1742 1751 PAGESIZE);
1743 1752 ap->an_pvp = pvp;
1744 1753 ap->an_poff = poff;
1745 1754 /* One page kluster */
1746 1755 start = offset;
1747 1756 plen = PAGESIZE;
1748 1757 pstart = poff;
1749 1758 poff += PAGESIZE;
1750 1759 prem -= PAGESIZE;
1751 1760 }
1752 1761 /* Free unassigned slots */
1753 1762 swap_phys_free(pvp, poff, prem);
1754 1763 mutex_exit(ahm);
1755 1764 break;
1756 1765 }
1757 1766 mutex_exit(ahm);
1758 1767 }
1759 1768 ASSERT(*offp <= start && start + plen <= *offp + *lenp);
1760 1769 ASSERT(start <= offset && offset < start + plen);
1761 1770 *offp = start;
1762 1771 *lenp = plen;
1763 1772 *pvpp = pvp;
1764 1773 *poffp = pstart;
1765 1774 return (error);
1766 1775 }
1767 1776
1768 1777
1769 1778 /*
1770 1779 * Get the physical swap backing store location for a given anonymous page
1771 1780 * named (vp, off). The backing store name is returned in (*pvpp, *poffp).
1772 1781 * Returns 0 success
1773 1782 * EIDRM -- no anon slot (page is not allocated)
1774 1783 */
1775 1784 int
1776 1785 swap_getphysname(
1777 1786 struct vnode *vp,
1778 1787 u_offset_t off,
1779 1788 struct vnode **pvpp,
1780 1789 u_offset_t *poffp)
1781 1790 {
1782 1791 struct anon *ap;
1783 1792 int error = 0;
1784 1793 kmutex_t *ahm;
1785 1794
1786 1795 ahm = AH_MUTEX(vp, off);
1787 1796 mutex_enter(ahm);
1788 1797
1789 1798 /* Get anon slot for vp, off */
1790 1799 ap = swap_anon(vp, off);
1791 1800 if (ap == NULL) {
1792 1801 error = EIDRM;
1793 1802 goto out;
1794 1803 }
1795 1804 *pvpp = ap->an_pvp;
1796 1805 *poffp = ap->an_poff;
1797 1806 out:
1798 1807 mutex_exit(ahm);
1799 1808 return (error);
1800 1809 }
|
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX