1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1987, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 /*
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
33 *
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
37 */
38
39 /*
40 * Each physical swap area has an associated bitmap representing
41 * its physical storage. The bitmap records which swap slots are
42 * currently allocated or freed. Allocation is done by searching
43 * through the bitmap for the first free slot. Thus, there's
44 * no linear relation between offset within the swap device and the
45 * address (within its segment(s)) of the page that the slot backs;
46 * instead, it's an arbitrary one-to-one mapping.
47 *
48 * Associated with each swap area is a swapinfo structure. These
49 * structures are linked into a linear list that determines the
50 * ordering of swap areas in the logical swap device. Each contains a
51 * pointer to the corresponding bitmap, the area's size, and its
52 * associated vnode.
53 */
54
55 #include <sys/types.h>
56 #include <sys/inttypes.h>
57 #include <sys/param.h>
58 #include <sys/t_lock.h>
59 #include <sys/sysmacros.h>
60 #include <sys/systm.h>
61 #include <sys/errno.h>
62 #include <sys/kmem.h>
63 #include <sys/vfs.h>
64 #include <sys/vnode.h>
65 #include <sys/pathname.h>
66 #include <sys/cmn_err.h>
67 #include <sys/vtrace.h>
68 #include <sys/swap.h>
69 #include <sys/dumphdr.h>
70 #include <sys/debug.h>
71 #include <sys/fs/snode.h>
72 #include <sys/fs/swapnode.h>
73 #include <sys/policy.h>
74 #include <sys/zone.h>
75
76 #include <vm/as.h>
77 #include <vm/seg.h>
78 #include <vm/page.h>
79 #include <vm/seg_vn.h>
80 #include <vm/hat.h>
81 #include <vm/anon.h>
82 #include <vm/seg_map.h>
83
84 /*
85 * To balance the load among multiple swap areas, we don't allow
86 * more than swap_maxcontig allocations to be satisfied from a
87 * single swap area before moving on to the next swap area. This
88 * effectively "interleaves" allocations among the many swap areas.
89 */
90 int swap_maxcontig; /* set by anon_init() to 1 Mb */
91
92 #define MINIROOTSIZE 12000 /* ~6 Meg XXX */
93
94 /*
95 * XXX - this lock is a kludge. It serializes some aspects of swapadd() and
96 * swapdel() (namely VOP_OPEN, VOP_CLOSE, VN_RELE). It protects against
97 * somebody swapadd'ing and getting swap slots from a vnode, while someone
98 * else is in the process of closing or rele'ing it.
99 */
100 static kmutex_t swap_lock;
101
102 kmutex_t swapinfo_lock;
103
104 /*
105 * protected by the swapinfo_lock
106 */
107 struct swapinfo *swapinfo;
108
109 static struct swapinfo *silast;
110 static int nswapfiles;
111
112 static u_offset_t swap_getoff(struct swapinfo *);
113 static int swapadd(struct vnode *, ulong_t, ulong_t, char *);
114 static int swapdel(struct vnode *, ulong_t);
115 static int swapslot_free(struct vnode *, u_offset_t, struct swapinfo *);
116
117 /*
118 * swap device bitmap allocation macros
119 */
120 #define MAPSHIFT 5
121 #define NBBW (NBPW * NBBY) /* number of bits per word */
122 #define TESTBIT(map, i) (((map)[(i) >> MAPSHIFT] & (1 << (i) % NBBW)))
123 #define SETBIT(map, i) (((map)[(i) >> MAPSHIFT] |= (1 << (i) % NBBW)))
124 #define CLEARBIT(map, i) (((map)[(i) >> MAPSHIFT] &= ~(1 << (i) % NBBW)))
125
126 int swap_debug = 0; /* set for debug printf's */
127 int swap_verify = 0; /* set to verify slots when freeing and allocating */
128
129 uint_t swapalloc_maxcontig;
130
131 /*
132 * Allocate a range of up to *lenp contiguous slots (page) from a physical
133 * swap device. Flags are one of:
134 * SA_NOT Must have a slot from a physical swap device other than the
135 * the one containing input (*vpp, *offp).
136 * Less slots than requested may be returned. *lenp allocated slots are
137 * returned starting at *offp on *vpp.
138 * Returns 1 for a successful allocation, 0 for couldn't allocate any slots,
139 * and -1 when there are no swap devices on this system.
140 */
141 int
142 swap_phys_alloc(
143 struct vnode **vpp,
144 u_offset_t *offp,
145 size_t *lenp,
146 uint_t flags)
147 {
148 struct swapinfo *sip;
149 offset_t soff, noff;
150 size_t len;
151
152 mutex_enter(&swapinfo_lock);
153 if (swapinfo == NULL) {
154 /* NO SWAP DEVICES on this system currently. */
155 mutex_exit(&swapinfo_lock);
156 return (-1);
157 }
158 sip = silast;
159
160 /* Find a desirable physical device and allocate from it. */
161 do {
162 if (sip == NULL)
163 break;
164 if (!(sip->si_flags & ST_INDEL) &&
165 (spgcnt_t)sip->si_nfpgs > 0) {
166 /* Caller wants other than specified swap device */
167 if (flags & SA_NOT) {
168 if (*vpp != sip->si_vp ||
169 *offp < sip->si_soff ||
170 *offp >= sip->si_eoff)
171 goto found;
172 /* Caller is loose, will take anything */
173 } else
174 goto found;
175 } else if (sip->si_nfpgs == 0)
176 sip->si_allocs = 0;
177 if ((sip = sip->si_next) == NULL)
178 sip = swapinfo;
179 } while (sip != silast);
180 mutex_exit(&swapinfo_lock);
181 return (0);
182 found:
183 soff = swap_getoff(sip);
184 sip->si_nfpgs--;
185 if (soff == -1)
186 panic("swap_alloc: swap_getoff failed!");
187
188 for (len = PAGESIZE; len < *lenp; len += PAGESIZE) {
189 if (sip->si_nfpgs == 0)
190 break;
191 if (swapalloc_maxcontig && len >= swapalloc_maxcontig)
192 break;
193 noff = swap_getoff(sip);
194 if (noff == -1) {
195 break;
196 } else if (noff != soff + len) {
197 CLEARBIT(sip->si_swapslots, btop(noff - sip->si_soff));
198 break;
199 }
200 sip->si_nfpgs--;
201 }
202 *vpp = sip->si_vp;
203 *offp = soff;
204 *lenp = len;
205 ASSERT((spgcnt_t)sip->si_nfpgs >= 0);
206 sip->si_allocs += btop(len);
207 if (sip->si_allocs >= swap_maxcontig) {
208 sip->si_allocs = 0;
209 if ((silast = sip->si_next) == NULL)
210 silast = swapinfo;
211 }
212 TRACE_2(TR_FAC_VM, TR_SWAP_ALLOC,
213 "swap_alloc:sip %p offset %lx", sip, soff);
214 mutex_exit(&swapinfo_lock);
215 return (1);
216 }
217
218 int swap_backsearch = 0;
219
220 /*
221 * Get a free offset on swap device sip.
222 * Return >=0 offset if succeeded, -1 for failure.
223 */
224 static u_offset_t
225 swap_getoff(struct swapinfo *sip)
226 {
227 uint_t *sp, *ep;
228 size_t aoff, boff, poff, slotnumber;
229
230 ASSERT(MUTEX_HELD(&swapinfo_lock));
231
232 sip->si_alloccnt++;
233 for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
234 ep = &sip->si_swapslots[sip->si_mapsize / NBPW]; sp < ep; sp++) {
235 if (*sp != (uint_t)0xffffffff)
236 goto foundentry;
237 else
238 sip->si_checkcnt++;
239 }
240 SWAP_PRINT(SW_ALLOC,
241 "swap_getoff: couldn't find slot from hint %ld to end\n",
242 sip->si_hint, 0, 0, 0, 0);
243 /*
244 * Go backwards? Check for faster method XXX
245 */
246 if (swap_backsearch) {
247 for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
248 ep = sip->si_swapslots; sp > ep; sp--) {
249 if (*sp != (uint_t)0xffffffff)
250 goto foundentry;
251 else
252 sip->si_checkcnt++;
253 }
254 } else {
255 for (sp = sip->si_swapslots,
256 ep = &sip->si_swapslots[sip->si_hint >> MAPSHIFT];
257 sp < ep; sp++) {
258 if (*sp != (uint_t)0xffffffff)
259 goto foundentry;
260 else
261 sip->si_checkcnt++;
262 }
263 }
264 if (*sp == 0xffffffff) {
265 cmn_err(CE_WARN, "No free swap slots!");
266 return ((u_offset_t)-1);
267 }
268
269 foundentry:
270 /*
271 * aoff is the page number offset (in bytes) of the si_swapslots
272 * array element containing a free page
273 *
274 * boff is the page number offset of the free page
275 * (i.e. cleared bit) in si_swapslots[aoff].
276 */
277 aoff = ((char *)sp - (char *)sip->si_swapslots) * NBBY;
278
279 for (boff = (sip->si_hint % NBBW); boff < NBBW; boff++) {
280 if (!TESTBIT(sip->si_swapslots, aoff + boff))
281 goto foundslot;
282 else
283 sip->si_checkcnt++;
284 }
285 for (boff = 0; boff < (sip->si_hint % NBBW); boff++) {
286 if (!TESTBIT(sip->si_swapslots, aoff + boff))
287 goto foundslot;
288 else
289 sip->si_checkcnt++;
290 }
291 panic("swap_getoff: didn't find slot in word hint %ld", sip->si_hint);
292
293 foundslot:
294 /*
295 * Return the offset of the free page in swap device.
296 * Convert page number of byte offset and add starting
297 * offset of swap device.
298 */
299 slotnumber = aoff + boff;
300 SWAP_PRINT(SW_ALLOC, "swap_getoff: allocating slot %ld\n",
301 slotnumber, 0, 0, 0, 0);
302 poff = ptob(slotnumber);
303 if (poff + sip->si_soff >= sip->si_eoff)
304 printf("ptob(aoff(%ld) + boff(%ld))(%ld) >= eoff(%ld)\n",
305 aoff, boff, ptob(slotnumber), (long)sip->si_eoff);
306 ASSERT(poff < sip->si_eoff);
307 /*
308 * We could verify here that the slot isn't already allocated
309 * by looking through all the anon slots.
310 */
311 SETBIT(sip->si_swapslots, slotnumber);
312 sip->si_hint = slotnumber + 1; /* hint = next slot */
313 return (poff + sip->si_soff);
314 }
315
316 /*
317 * Free a swap page.
318 */
319 void
320 swap_phys_free(struct vnode *vp, u_offset_t off, size_t len)
321 {
322 struct swapinfo *sip;
323 ssize_t pagenumber, npage;
324
325 mutex_enter(&swapinfo_lock);
326 sip = swapinfo;
327
328 do {
329 if (sip->si_vp == vp &&
330 sip->si_soff <= off && off < sip->si_eoff) {
331 for (pagenumber = btop(off - sip->si_soff),
332 npage = btop(len) + pagenumber;
333 pagenumber < npage; pagenumber++) {
334 SWAP_PRINT(SW_ALLOC,
335 "swap_phys_free: freeing slot %ld on "
336 "sip %p\n",
337 pagenumber, sip, 0, 0, 0);
338 if (!TESTBIT(sip->si_swapslots, pagenumber)) {
339 panic(
340 "swap_phys_free: freeing free slot "
341 "%p,%lx\n", (void *)vp,
342 ptob(pagenumber) + sip->si_soff);
343 }
344 CLEARBIT(sip->si_swapslots, pagenumber);
345 sip->si_nfpgs++;
346 }
347 ASSERT(sip->si_nfpgs <= sip->si_npgs);
348 mutex_exit(&swapinfo_lock);
349 return;
350 }
351 } while ((sip = sip->si_next) != NULL);
352 panic("swap_phys_free");
353 /*NOTREACHED*/
354 }
355
356 /*
357 * Return the anon struct corresponding for the given
358 * <vnode, off> if it is part of the virtual swap device.
359 * Return the anon struct if found, otherwise NULL.
360 */
361 struct anon *
362 swap_anon(struct vnode *vp, u_offset_t off)
363 {
364 struct anon *ap;
365
366 ASSERT(MUTEX_HELD(AH_MUTEX(vp, off)));
367
368 for (ap = anon_hash[ANON_HASH(vp, off)]; ap != NULL; ap = ap->an_hash) {
369 if (ap->an_vp == vp && ap->an_off == off)
370 return (ap);
371 }
372 return (NULL);
373 }
374
375
376 /*
377 * Determine if the vp offset range overlap a swap device.
378 */
379 int
380 swap_in_range(struct vnode *vp, u_offset_t offset, size_t len)
381 {
382 struct swapinfo *sip;
383 u_offset_t eoff;
384
385 eoff = offset + len;
386 ASSERT(eoff > offset);
387
388 mutex_enter(&swapinfo_lock);
389 sip = swapinfo;
390 if (vp && sip) {
391 do {
392 if (vp != sip->si_vp || eoff <= sip->si_soff ||
393 offset >= sip->si_eoff)
394 continue;
395 mutex_exit(&swapinfo_lock);
396 return (1);
397 } while ((sip = sip->si_next) != NULL);
398 }
399 mutex_exit(&swapinfo_lock);
400 return (0);
401 }
402
403 /*
404 * See if name is one of our swap files
405 * even though lookupname failed.
406 * This can be used by swapdel to delete
407 * swap resources on remote machines
408 * where the link has gone down.
409 */
410 static struct vnode *
411 swapdel_byname(
412 char *name, /* pathname to delete */
413 ulong_t lowblk) /* Low block number of area to delete */
414 {
415 struct swapinfo **sipp, *osip;
416 u_offset_t soff;
417
418 /*
419 * Find the swap file entry for the file to
420 * be deleted. Skip any entries that are in
421 * transition.
422 */
423
424 soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
425
426 mutex_enter(&swapinfo_lock);
427 for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
428 if ((strcmp(osip->si_pname, name) == 0) &&
429 (osip->si_soff == soff) && (osip->si_flags == 0)) {
430 struct vnode *vp = osip->si_vp;
431
432 VN_HOLD(vp);
433 mutex_exit(&swapinfo_lock);
434 return (vp);
435 }
436 }
437 mutex_exit(&swapinfo_lock);
438 return (NULL);
439 }
440
441
442 /*
443 * New system call to manipulate swap files.
444 */
445 int
446 swapctl(int sc_cmd, void *sc_arg, int *rv)
447 {
448 struct swapinfo *sip, *csip, *tsip;
449 int error = 0;
450 struct swapent st, *ust;
451 struct swapres sr;
452 struct vnode *vp;
453 int cnt = 0;
454 int tmp_nswapfiles;
455 int nswap;
456 int length, nlen;
457 int gplen = 0, plen;
458 char *swapname;
459 char *pname;
460 char *tpname;
461 struct anoninfo ai;
462 spgcnt_t avail;
463 int global = INGLOBALZONE(curproc);
464 struct zone *zp = curproc->p_zone;
465
466 /*
467 * When running in a zone we want to hide the details of the swap
468 * devices: we report there only being one swap device named "swap"
469 * having a size equal to the sum of the sizes of all real swap devices
470 * on the system.
471 */
472 switch (sc_cmd) {
473 case SC_GETNSWP:
474 if (global)
475 *rv = nswapfiles;
476 else
477 *rv = 1;
478 return (0);
479
480 case SC_AINFO:
481 /*
482 * Return anoninfo information with these changes:
483 * ani_max = maximum amount of swap space
484 * (including potentially available physical memory)
485 * ani_free = amount of unallocated anonymous memory
486 * (some of which might be reserved and including
487 * potentially available physical memory)
488 * ani_resv = amount of claimed (reserved) anonymous memory
489 */
490 avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
491 ai.ani_max = (k_anoninfo.ani_max +
492 k_anoninfo.ani_mem_resv) + avail;
493
494 /* Update ani_free */
495 set_anoninfo();
496 ai.ani_free = k_anoninfo.ani_free + avail;
497
498 ai.ani_resv = k_anoninfo.ani_phys_resv +
499 k_anoninfo.ani_mem_resv;
500
501 if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
502 /*
503 * We're in a non-global zone with a swap cap. We
504 * always report the system-wide values for the global
505 * zone, even though it too can have a swap cap.
506 */
507
508 /*
509 * For a swap-capped zone, the numbers are contrived
510 * since we don't have a correct value of 'reserved'
511 * for the zone.
512 *
513 * The ani_max value is always the zone's swap cap.
514 *
515 * The ani_free value is always the difference between
516 * the cap and the amount of swap in use by the zone.
517 *
518 * The ani_resv value is typically set to be the amount
519 * of swap in use by the zone, but can be adjusted
520 * upwards to indicate how much swap is currently
521 * unavailable to that zone due to usage by entities
522 * outside the zone.
523 *
524 * This works as follows.
525 *
526 * In the 'swap -s' output, the data is displayed
527 * as follows:
528 * allocated = ani_max - ani_free
529 * reserved = ani_resv - allocated
530 * available = ani_max - ani_resv
531 *
532 * Taking a contrived example, if the swap cap is 100
533 * and the amount of swap used by the zone is 75, this
534 * gives:
535 * allocated = ani_max - ani_free = 100 - 25 = 75
536 * reserved = ani_resv - allocated = 75 - 75 = 0
537 * available = ani_max - ani_resv = 100 - 75 = 25
538 *
539 * In this typical case, you can see that the 'swap -s'
540 * 'reserved' will always be 0 inside a swap capped
541 * zone.
542 *
543 * However, if the system as a whole has less free
544 * swap than the zone limits allow, then we adjust
545 * the ani_resv value up so that it is the difference
546 * between the zone cap and the amount of free system
547 * swap. Taking the above example, but when the
548 * system as a whole only has 20 of swap available, we
549 * get an ani_resv of 100 - 20 = 80. This gives:
550 * allocated = ani_max - ani_free = 100 - 25 = 75
551 * reserved = ani_resv - allocated = 80 - 75 = 5
552 * available = ani_max - ani_resv = 100 - 80 = 20
553 *
554 * In this case, you can see how the ani_resv value is
555 * tweaked up to make the 'swap -s' numbers work inside
556 * the zone.
557 */
558 rctl_qty_t cap, used;
559 pgcnt_t pgcap, sys_avail;
560
561 mutex_enter(&zp->zone_mem_lock);
562 cap = zp->zone_max_swap_ctl;
563 used = zp->zone_max_swap;
564 mutex_exit(&zp->zone_mem_lock);
565
566 pgcap = MIN(btop(cap), ai.ani_max);
567 ai.ani_free = pgcap - btop(used);
568
569 /* Get the system-wide swap currently available. */
570 sys_avail = ai.ani_max - ai.ani_resv;
571 if (sys_avail < ai.ani_free)
572 ai.ani_resv = pgcap - sys_avail;
573 else
574 ai.ani_resv = btop(used);
575
576 ai.ani_max = pgcap;
577 }
578
579 if (copyout(&ai, sc_arg, sizeof (struct anoninfo)) != 0)
580 return (EFAULT);
581 return (0);
582
583 case SC_LIST:
584 if (copyin(sc_arg, &length, sizeof (int)) != 0)
585 return (EFAULT);
586 if (!global) {
587 struct swapent st;
588 char *swappath = "swap";
589
590 if (length < 1)
591 return (ENOMEM);
592 ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
593 if (copyin(ust, &st, sizeof (swapent_t)) != 0)
594 return (EFAULT);
595 st.ste_start = PAGESIZE >> SCTRSHFT;
596 st.ste_length = (off_t)0;
597 st.ste_pages = 0;
598 st.ste_free = 0;
599 st.ste_flags = 0;
600
601 mutex_enter(&swapinfo_lock);
602 for (sip = swapinfo, nswap = 0;
603 sip != NULL && nswap < nswapfiles;
604 sip = sip->si_next, nswap++) {
605 st.ste_length +=
606 (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
607 st.ste_pages += sip->si_npgs;
608 st.ste_free += sip->si_nfpgs;
609 }
610 mutex_exit(&swapinfo_lock);
611
612 if (zp->zone_max_swap_ctl != UINT64_MAX) {
613 rctl_qty_t cap, used;
614
615 mutex_enter(&zp->zone_mem_lock);
616 cap = zp->zone_max_swap_ctl;
617 used = zp->zone_max_swap;
618 mutex_exit(&zp->zone_mem_lock);
619
620 st.ste_length = MIN(cap, st.ste_length);
621 st.ste_pages = MIN(btop(cap), st.ste_pages);
622 st.ste_free = MIN(st.ste_pages - btop(used),
623 st.ste_free);
624 }
625
626 if (copyout(&st, ust, sizeof (swapent_t)) != 0 ||
627 copyout(swappath, st.ste_path,
628 strlen(swappath) + 1) != 0) {
629 return (EFAULT);
630 }
631 *rv = 1;
632 return (0);
633 }
634 beginning:
635 tmp_nswapfiles = nswapfiles;
636 /* Return an error if not enough space for the whole table. */
637 if (length < tmp_nswapfiles)
638 return (ENOMEM);
639 /*
640 * Get memory to hold the swap entries and their names. We'll
641 * copy the real entries into these and then copy these out.
642 * Allocating the pathname memory is only a guess so we may
643 * find that we need more and have to do it again.
644 * All this is because we have to hold the anon lock while
645 * traversing the swapinfo list, and we can't be doing copyouts
646 * and/or kmem_alloc()s during this.
647 */
648 csip = kmem_zalloc(tmp_nswapfiles * sizeof (struct swapinfo),
649 KM_SLEEP);
650 retry:
651 nlen = tmp_nswapfiles * (gplen += 100);
652 pname = kmem_zalloc(nlen, KM_SLEEP);
653
654 mutex_enter(&swapinfo_lock);
655
656 if (tmp_nswapfiles != nswapfiles) {
657 mutex_exit(&swapinfo_lock);
658 kmem_free(pname, nlen);
659 kmem_free(csip,
660 tmp_nswapfiles * sizeof (struct swapinfo));
661 gplen = 0;
662 goto beginning;
663 }
664 for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
665 sip && nswap < tmp_nswapfiles;
666 sip = sip->si_next, tsip++, tpname += plen, nswap++) {
667 plen = sip->si_pnamelen;
668 if (tpname + plen - pname > nlen) {
669 mutex_exit(&swapinfo_lock);
670 kmem_free(pname, nlen);
671 goto retry;
672 }
673 *tsip = *sip;
674 tsip->si_pname = tpname;
675 (void) strcpy(tsip->si_pname, sip->si_pname);
676 }
677 mutex_exit(&swapinfo_lock);
678
679 if (sip) {
680 error = ENOMEM;
681 goto lout;
682 }
683 ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
684 for (tsip = csip, cnt = 0; cnt < nswap; tsip++, ust++, cnt++) {
685 if (copyin(ust, &st, sizeof (swapent_t)) != 0) {
686 error = EFAULT;
687 goto lout;
688 }
689 st.ste_flags = tsip->si_flags;
690 st.ste_length =
691 (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
692 st.ste_start = tsip->si_soff >> SCTRSHFT;
693 st.ste_pages = tsip->si_npgs;
694 st.ste_free = tsip->si_nfpgs;
695 if (copyout(&st, ust, sizeof (swapent_t)) != 0) {
696 error = EFAULT;
697 goto lout;
698 }
699 if (!tsip->si_pnamelen)
700 continue;
701 if (copyout(tsip->si_pname, st.ste_path,
702 tsip->si_pnamelen) != 0) {
703 error = EFAULT;
704 goto lout;
705 }
706 }
707 *rv = nswap;
708 lout:
709 kmem_free(csip, tmp_nswapfiles * sizeof (struct swapinfo));
710 kmem_free(pname, nlen);
711 return (error);
712
713 case SC_ADD:
714 case SC_REMOVE:
715 break;
716 default:
717 return (EINVAL);
718 }
719 if ((error = secpolicy_swapctl(CRED())) != 0)
720 return (error);
721
722 if (copyin(sc_arg, &sr, sizeof (swapres_t)))
723 return (EFAULT);
724
725 /* Allocate the space to read in pathname */
726 if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
727 return (ENOMEM);
728
729 error = copyinstr(sr.sr_name, swapname, MAXPATHLEN, 0);
730 if (error)
731 goto out;
732
733 error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
734 if (error) {
735 if (sc_cmd == SC_ADD)
736 goto out;
737 /* see if we match by name */
738 vp = swapdel_byname(swapname, (size_t)sr.sr_start);
739 if (vp == NULL)
740 goto out;
741 }
742
743 if (vp->v_flag & (VNOMAP | VNOSWAP)) {
744 VN_RELE(vp);
745 error = ENOSYS;
746 goto out;
747 }
748 switch (vp->v_type) {
749 case VBLK:
750 break;
751
752 case VREG:
753 if (vp->v_vfsp && vn_is_readonly(vp))
754 error = EROFS;
755 else
756 error = VOP_ACCESS(vp, VREAD|VWRITE, 0, CRED(), NULL);
757 break;
758
759 case VDIR:
760 error = EISDIR;
761 break;
762 default:
763 error = ENOSYS;
764 break;
765 }
766 if (error == 0) {
767 if (sc_cmd == SC_REMOVE)
768 error = swapdel(vp, sr.sr_start);
769 else
770 error = swapadd(vp, sr.sr_start,
771 sr.sr_length, swapname);
772 }
773 VN_RELE(vp);
774 out:
775 kmem_free(swapname, MAXPATHLEN);
776 return (error);
777 }
778
779 #if defined(_LP64) && defined(_SYSCALL32)
780
781 int
782 swapctl32(int sc_cmd, void *sc_arg, int *rv)
783 {
784 struct swapinfo *sip, *csip, *tsip;
785 int error = 0;
786 struct swapent32 st, *ust;
787 struct swapres32 sr;
788 struct vnode *vp;
789 int cnt = 0;
790 int tmp_nswapfiles;
791 int nswap;
792 int length, nlen;
793 int gplen = 0, plen;
794 char *swapname;
795 char *pname;
796 char *tpname;
797 struct anoninfo32 ai;
798 size_t s;
799 spgcnt_t avail;
800 int global = INGLOBALZONE(curproc);
801 struct zone *zp = curproc->p_zone;
802
803 /*
804 * When running in a zone we want to hide the details of the swap
805 * devices: we report there only being one swap device named "swap"
806 * having a size equal to the sum of the sizes of all real swap devices
807 * on the system.
808 */
809 switch (sc_cmd) {
810 case SC_GETNSWP:
811 if (global)
812 *rv = nswapfiles;
813 else
814 *rv = 1;
815 return (0);
816
817 case SC_AINFO:
818 /*
819 * Return anoninfo information with these changes:
820 * ani_max = maximum amount of swap space
821 * (including potentially available physical memory)
822 * ani_free = amount of unallocated anonymous memory
823 * (some of which might be reserved and including
824 * potentially available physical memory)
825 * ani_resv = amount of claimed (reserved) anonymous memory
826 */
827 avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
828 s = (k_anoninfo.ani_max + k_anoninfo.ani_mem_resv) + avail;
829 if (s > UINT32_MAX)
830 return (EOVERFLOW);
831 ai.ani_max = s;
832
833 /* Update ani_free */
834 set_anoninfo();
835 s = k_anoninfo.ani_free + avail;
836 if (s > UINT32_MAX)
837 return (EOVERFLOW);
838 ai.ani_free = s;
839
840 s = k_anoninfo.ani_phys_resv + k_anoninfo.ani_mem_resv;
841 if (s > UINT32_MAX)
842 return (EOVERFLOW);
843 ai.ani_resv = s;
844
845 if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
846 /*
847 * We're in a non-global zone with a swap cap. We
848 * always report the system-wide values for the global
849 * zone, even though it too can have a swap cap.
850 * See the comment for the SC_AINFO case in swapctl()
851 * which explains the following logic.
852 */
853 rctl_qty_t cap, used;
854 pgcnt_t pgcap, sys_avail;
855
856 mutex_enter(&zp->zone_mem_lock);
857 cap = zp->zone_max_swap_ctl;
858 used = zp->zone_max_swap;
859 mutex_exit(&zp->zone_mem_lock);
860
861 pgcap = MIN(btop(cap), ai.ani_max);
862 ai.ani_free = pgcap - btop(used);
863
864 /* Get the system-wide swap currently available. */
865 sys_avail = ai.ani_max - ai.ani_resv;
866 if (sys_avail < ai.ani_free)
867 ai.ani_resv = pgcap - sys_avail;
868 else
869 ai.ani_resv = btop(used);
870
871 ai.ani_max = pgcap;
872 }
873
874 if (copyout(&ai, sc_arg, sizeof (ai)) != 0)
875 return (EFAULT);
876 return (0);
877
878 case SC_LIST:
879 if (copyin(sc_arg, &length, sizeof (int32_t)) != 0)
880 return (EFAULT);
881 if (!global) {
882 struct swapent32 st;
883 char *swappath = "swap";
884
885 if (length < 1)
886 return (ENOMEM);
887 ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
888 if (copyin(ust, &st, sizeof (swapent32_t)) != 0)
889 return (EFAULT);
890 st.ste_start = PAGESIZE >> SCTRSHFT;
891 st.ste_length = (off_t)0;
892 st.ste_pages = 0;
893 st.ste_free = 0;
894 st.ste_flags = 0;
895
896 mutex_enter(&swapinfo_lock);
897 for (sip = swapinfo, nswap = 0;
898 sip != NULL && nswap < nswapfiles;
899 sip = sip->si_next, nswap++) {
900 st.ste_length +=
901 (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
902 st.ste_pages += sip->si_npgs;
903 st.ste_free += sip->si_nfpgs;
904 }
905 mutex_exit(&swapinfo_lock);
906
907 if (zp->zone_max_swap_ctl != UINT64_MAX) {
908 rctl_qty_t cap, used;
909
910 mutex_enter(&zp->zone_mem_lock);
911 cap = zp->zone_max_swap_ctl;
912 used = zp->zone_max_swap;
913 mutex_exit(&zp->zone_mem_lock);
914
915 st.ste_length = MIN(cap, st.ste_length);
916 st.ste_pages = MIN(btop(cap), st.ste_pages);
917 st.ste_free = MIN(st.ste_pages - btop(used),
918 st.ste_free);
919 }
920
921 if (copyout(&st, ust, sizeof (swapent32_t)) != 0 ||
922 copyout(swappath, (caddr_t)(uintptr_t)st.ste_path,
923 strlen(swappath) + 1) != 0) {
924 return (EFAULT);
925 }
926 *rv = 1;
927 return (0);
928 }
929 beginning:
930 tmp_nswapfiles = nswapfiles;
931 /* Return an error if not enough space for the whole table. */
932 if (length < tmp_nswapfiles)
933 return (ENOMEM);
934 /*
935 * Get memory to hold the swap entries and their names. We'll
936 * copy the real entries into these and then copy these out.
937 * Allocating the pathname memory is only a guess so we may
938 * find that we need more and have to do it again.
939 * All this is because we have to hold the anon lock while
940 * traversing the swapinfo list, and we can't be doing copyouts
941 * and/or kmem_alloc()s during this.
942 */
943 csip = kmem_zalloc(tmp_nswapfiles * sizeof (*csip), KM_SLEEP);
944 retry:
945 nlen = tmp_nswapfiles * (gplen += 100);
946 pname = kmem_zalloc(nlen, KM_SLEEP);
947
948 mutex_enter(&swapinfo_lock);
949
950 if (tmp_nswapfiles != nswapfiles) {
951 mutex_exit(&swapinfo_lock);
952 kmem_free(pname, nlen);
953 kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
954 gplen = 0;
955 goto beginning;
956 }
957 for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
958 (sip != NULL) && (nswap < tmp_nswapfiles);
959 sip = sip->si_next, tsip++, tpname += plen, nswap++) {
960 plen = sip->si_pnamelen;
961 if (tpname + plen - pname > nlen) {
962 mutex_exit(&swapinfo_lock);
963 kmem_free(pname, nlen);
964 goto retry;
965 }
966 *tsip = *sip;
967 tsip->si_pname = tpname;
968 (void) strcpy(tsip->si_pname, sip->si_pname);
969 }
970 mutex_exit(&swapinfo_lock);
971
972 if (sip != NULL) {
973 error = ENOMEM;
974 goto lout;
975 }
976 ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
977 for (tsip = csip, cnt = 0; cnt < nswap; tsip++, ust++, cnt++) {
978 if (copyin(ust, &st, sizeof (*ust)) != 0) {
979 error = EFAULT;
980 goto lout;
981 }
982 st.ste_flags = tsip->si_flags;
983 st.ste_length =
984 (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
985 st.ste_start = tsip->si_soff >> SCTRSHFT;
986 st.ste_pages = tsip->si_npgs;
987 st.ste_free = tsip->si_nfpgs;
988 if (copyout(&st, ust, sizeof (st)) != 0) {
989 error = EFAULT;
990 goto lout;
991 }
992 if (!tsip->si_pnamelen)
993 continue;
994 if (copyout(tsip->si_pname,
995 (caddr_t)(uintptr_t)st.ste_path,
996 tsip->si_pnamelen) != 0) {
997 error = EFAULT;
998 goto lout;
999 }
1000 }
1001 *rv = nswap;
1002 lout:
1003 kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
1004 kmem_free(pname, nlen);
1005 return (error);
1006
1007 case SC_ADD:
1008 case SC_REMOVE:
1009 break;
1010 default:
1011 return (EINVAL);
1012 }
1013 if ((error = secpolicy_swapctl(CRED())) != 0)
1014 return (error);
1015
1016 if (copyin(sc_arg, &sr, sizeof (sr)))
1017 return (EFAULT);
1018
1019 /* Allocate the space to read in pathname */
1020 if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
1021 return (ENOMEM);
1022
1023 error = copyinstr((caddr_t)(uintptr_t)sr.sr_name,
1024 swapname, MAXPATHLEN, NULL);
1025 if (error)
1026 goto out;
1027
1028 error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
1029 if (error) {
1030 if (sc_cmd == SC_ADD)
1031 goto out;
1032 /* see if we match by name */
1033 vp = swapdel_byname(swapname, (uint_t)sr.sr_start);
1034 if (vp == NULL)
1035 goto out;
1036 }
1037
1038 if (vp->v_flag & (VNOMAP | VNOSWAP)) {
1039 VN_RELE(vp);
1040 error = ENOSYS;
1041 goto out;
1042 }
1043 switch (vp->v_type) {
1044 case VBLK:
1045 break;
1046
1047 case VREG:
1048 if (vp->v_vfsp && vn_is_readonly(vp))
1049 error = EROFS;
1050 else
1051 error = VOP_ACCESS(vp, VREAD|VWRITE, 0, CRED(), NULL);
1052 break;
1053
1054 case VDIR:
1055 error = EISDIR;
1056 break;
1057 default:
1058 error = ENOSYS;
1059 break;
1060 }
1061 if (error == 0) {
1062 if (sc_cmd == SC_REMOVE)
1063 error = swapdel(vp, sr.sr_start);
1064 else
1065 error = swapadd(vp, sr.sr_start, sr.sr_length,
1066 swapname);
1067 }
1068 VN_RELE(vp);
1069 out:
1070 kmem_free(swapname, MAXPATHLEN);
1071 return (error);
1072 }
1073
1074 #endif /* _LP64 && _SYSCALL32 */
1075
1076 /*
1077 * Add a new swap file.
1078 */
1079 int
1080 swapadd(struct vnode *vp, ulong_t lowblk, ulong_t nblks, char *swapname)
1081 {
1082 struct swapinfo **sipp, *nsip = NULL, *esip = NULL;
1083 struct vnode *cvp;
1084 struct vattr vattr;
1085 pgcnt_t pages;
1086 u_offset_t soff, eoff;
1087 int error;
1088 ssize_t i, start, end;
1089 ushort_t wasswap;
1090 ulong_t startblk;
1091 size_t returned_mem;
1092
1093 SWAP_PRINT(SW_CTL, "swapadd: vp %p lowblk %ld nblks %ld swapname %s\n",
1094 vp, lowblk, nblks, swapname, 0);
1095 /*
1096 * Get the real vnode. (If vp is not a specnode it just returns vp, so
1097 * it does the right thing, but having this code know about specnodes
1098 * violates the spirit of having it be indepedent of vnode type.)
1099 */
1100 cvp = common_specvp(vp);
1101
1102 /*
1103 * Or in VISSWAP so file system has chance to deny swap-ons during open.
1104 */
1105 mutex_enter(&cvp->v_lock);
1106 wasswap = cvp->v_flag & VISSWAP;
1107 cvp->v_flag |= VISSWAP;
1108 mutex_exit(&cvp->v_lock);
1109
1110 mutex_enter(&swap_lock);
1111 if (error = VOP_OPEN(&cvp, FREAD|FWRITE, CRED(), NULL)) {
1112 mutex_exit(&swap_lock);
1113 /* restore state of v_flag */
1114 if (!wasswap) {
1115 mutex_enter(&cvp->v_lock);
1116 cvp->v_flag &= ~VISSWAP;
1117 mutex_exit(&cvp->v_lock);
1118 }
1119 return (error);
1120 }
1121 mutex_exit(&swap_lock);
1122
1123 /*
1124 * Get partition size. Return error if empty partition,
1125 * or if request does not fit within the partition.
1126 * If this is the first swap device, we can reduce
1127 * the size of the swap area to match what is
1128 * available. This can happen if the system was built
1129 * on a machine with a different size swap partition.
1130 */
1131 vattr.va_mask = AT_SIZE;
1132 if (error = VOP_GETATTR(cvp, &vattr, ATTR_COMM, CRED(), NULL))
1133 goto out;
1134
1135 /*
1136 * Specfs returns a va_size of MAXOFFSET_T (UNKNOWN_SIZE) when the
1137 * size of the device can't be determined.
1138 */
1139 if ((vattr.va_size == 0) || (vattr.va_size == MAXOFFSET_T)) {
1140 error = EINVAL;
1141 goto out;
1142 }
1143
1144 #ifdef _ILP32
1145 /*
1146 * No support for large swap in 32-bit OS, if the size of the swap is
1147 * bigger than MAXOFF32_T then the size used by swapfs must be limited.
1148 * This limitation is imposed by the swap subsystem itself, a D_64BIT
1149 * driver as the target of swap operation should be able to field
1150 * the IO.
1151 */
1152 if (vattr.va_size > MAXOFF32_T) {
1153 cmn_err(CE_NOTE,
1154 "!swap device %s truncated from 0x%llx to 0x%x bytes",
1155 swapname, vattr.va_size, MAXOFF32_T);
1156 vattr.va_size = MAXOFF32_T;
1157 }
1158 #endif /* _ILP32 */
1159
1160 /* Fail if file not writeable (try to set size to current size) */
1161 vattr.va_mask = AT_SIZE;
1162 if (error = VOP_SETATTR(cvp, &vattr, 0, CRED(), NULL))
1163 goto out;
1164
1165 /* Fail if fs does not support VOP_PAGEIO */
1166 error = VOP_PAGEIO(cvp, (page_t *)NULL, (u_offset_t)0, 0, 0, CRED(),
1167 NULL);
1168
1169 if (error == ENOSYS)
1170 goto out;
1171 else
1172 error = 0;
1173 /*
1174 * If swapping on the root filesystem don't put swap blocks that
1175 * correspond to the miniroot filesystem on the swap free list.
1176 */
1177 if (cvp == rootdir)
1178 startblk = roundup(MINIROOTSIZE<<SCTRSHFT, klustsize)>>SCTRSHFT;
1179 else /* Skip 1st page (disk label) */
1180 startblk = (ulong_t)(lowblk ? lowblk : 1);
1181
1182 soff = startblk << SCTRSHFT;
1183 if (soff >= vattr.va_size) {
1184 error = EINVAL;
1185 goto out;
1186 }
1187
1188 /*
1189 * If user specified 0 blks, use the size of the device
1190 */
1191 eoff = nblks ? soff + (nblks - (startblk - lowblk) << SCTRSHFT) :
1192 vattr.va_size;
1193
1194 SWAP_PRINT(SW_CTL, "swapadd: va_size %ld soff %ld eoff %ld\n",
1195 vattr.va_size, soff, eoff, 0, 0);
1196
1197 if (eoff > vattr.va_size) {
1198 error = EINVAL;
1199 goto out;
1200 }
1201
1202 /*
1203 * The starting and ending offsets must be page aligned.
1204 * Round soff up to next page boundary, round eoff
1205 * down to previous page boundary.
1206 */
1207 soff = ptob(btopr(soff));
1208 eoff = ptob(btop(eoff));
1209 if (soff >= eoff) {
1210 SWAP_PRINT(SW_CTL, "swapadd: soff %ld >= eoff %ld\n",
1211 soff, eoff, 0, 0, 0);
1212 error = EINVAL;
1213 goto out;
1214 }
1215
1216 pages = btop(eoff - soff);
1217
1218 /* Allocate and partially set up the new swapinfo */
1219 nsip = kmem_zalloc(sizeof (struct swapinfo), KM_SLEEP);
1220 nsip->si_vp = cvp;
1221
1222 nsip->si_soff = soff;
1223 nsip->si_eoff = eoff;
1224 nsip->si_hint = 0;
1225 nsip->si_checkcnt = nsip->si_alloccnt = 0;
1226
1227 nsip->si_pnamelen = (int)strlen(swapname) + 1;
1228 nsip->si_pname = (char *)kmem_zalloc(nsip->si_pnamelen, KM_SLEEP);
1229 bcopy(swapname, nsip->si_pname, nsip->si_pnamelen - 1);
1230 SWAP_PRINT(SW_CTL, "swapadd: allocating swapinfo for %s, %ld pages\n",
1231 swapname, pages, 0, 0, 0);
1232 /*
1233 * Size of swapslots map in bytes
1234 */
1235 nsip->si_mapsize = P2ROUNDUP(pages, NBBW) / NBBY;
1236 nsip->si_swapslots = kmem_zalloc(nsip->si_mapsize, KM_SLEEP);
1237
1238 /*
1239 * Permanently set the bits that can't ever be allocated,
1240 * i.e. those from the ending offset to the round up slot for the
1241 * swapslots bit map.
1242 */
1243 start = pages;
1244 end = P2ROUNDUP(pages, NBBW);
1245 for (i = start; i < end; i++) {
1246 SWAP_PRINT(SW_CTL, "swapadd: set bit for page %ld\n", i,
1247 0, 0, 0, 0);
1248 SETBIT(nsip->si_swapslots, i);
1249 }
1250 nsip->si_npgs = nsip->si_nfpgs = pages;
1251 /*
1252 * Now check to see if we can add it. We wait til now to check because
1253 * we need the swapinfo_lock and we don't want sleep with it (e.g.,
1254 * during kmem_alloc()) while we're setting up the swapinfo.
1255 */
1256 mutex_enter(&swapinfo_lock);
1257 for (sipp = &swapinfo; (esip = *sipp) != NULL; sipp = &esip->si_next) {
1258 if (esip->si_vp == cvp) {
1259 if (esip->si_soff == soff && esip->si_npgs == pages &&
1260 (esip->si_flags & ST_DOINGDEL)) {
1261 /*
1262 * We are adding a device that we are in the
1263 * middle of deleting. Just clear the
1264 * ST_DOINGDEL flag to signal this and
1265 * the deletion routine will eventually notice
1266 * it and add it back.
1267 */
1268 esip->si_flags &= ~ST_DOINGDEL;
1269 mutex_exit(&swapinfo_lock);
1270 goto out;
1271 }
1272 /* disallow overlapping swap files */
1273 if ((soff < esip->si_eoff) && (eoff > esip->si_soff)) {
1274 error = EEXIST;
1275 mutex_exit(&swapinfo_lock);
1276 goto out;
1277 }
1278 }
1279 }
1280
1281 nswapfiles++;
1282
1283 /*
1284 * add new swap device to list and shift allocations to it
1285 * before updating the anoninfo counters
1286 */
1287 *sipp = nsip;
1288 silast = nsip;
1289
1290 /*
1291 * Update the total amount of reservable swap space
1292 * accounting properly for swap space from physical memory
1293 */
1294 /* New swap device soaks up currently reserved memory swap */
1295 mutex_enter(&anoninfo_lock);
1296
1297 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1298 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1299
1300 k_anoninfo.ani_max += pages;
1301 ANI_ADD(pages);
1302 if (k_anoninfo.ani_mem_resv > k_anoninfo.ani_locked_swap) {
1303 returned_mem = MIN(k_anoninfo.ani_mem_resv -
1304 k_anoninfo.ani_locked_swap,
1305 k_anoninfo.ani_max - k_anoninfo.ani_phys_resv);
1306
1307 ANI_ADD(-returned_mem);
1308 k_anoninfo.ani_free -= returned_mem;
1309 k_anoninfo.ani_mem_resv -= returned_mem;
1310 k_anoninfo.ani_phys_resv += returned_mem;
1311
1312 mutex_enter(&freemem_lock);
1313 availrmem += returned_mem;
1314 mutex_exit(&freemem_lock);
1315 }
1316 /*
1317 * At boot time, to permit booting small memory machines using
1318 * only physical memory as swap space, we allowed a dangerously
1319 * large amount of memory to be used as swap space; now that
1320 * more physical backing store is available bump down the amount
1321 * we can get from memory to a safer size.
1322 */
1323 if (swapfs_minfree < swapfs_desfree) {
1324 mutex_enter(&freemem_lock);
1325 if (availrmem > swapfs_desfree || !k_anoninfo.ani_mem_resv)
1326 swapfs_minfree = swapfs_desfree;
1327 mutex_exit(&freemem_lock);
1328 }
1329
1330 SWAP_PRINT(SW_CTL, "swapadd: ani_max %ld ani_free %ld\n",
1331 k_anoninfo.ani_free, k_anoninfo.ani_free, 0, 0, 0);
1332
1333 mutex_exit(&anoninfo_lock);
1334
1335 mutex_exit(&swapinfo_lock);
1336
1337 /* Initialize the dump device */
1338 mutex_enter(&dump_lock);
1339 if (dumpvp == NULL)
1340 (void) dumpinit(vp, swapname, 0);
1341 mutex_exit(&dump_lock);
1342
1343 VN_HOLD(cvp);
1344 out:
1345 if (error || esip) {
1346 SWAP_PRINT(SW_CTL, "swapadd: error (%d)\n", error, 0, 0, 0, 0);
1347
1348 if (!wasswap) {
1349 mutex_enter(&cvp->v_lock);
1350 cvp->v_flag &= ~VISSWAP;
1351 mutex_exit(&cvp->v_lock);
1352 }
1353 if (nsip) {
1354 kmem_free(nsip->si_swapslots, (size_t)nsip->si_mapsize);
1355 kmem_free(nsip->si_pname, nsip->si_pnamelen);
1356 kmem_free(nsip, sizeof (*nsip));
1357 }
1358 mutex_enter(&swap_lock);
1359 (void) VOP_CLOSE(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(),
1360 NULL);
1361 mutex_exit(&swap_lock);
1362 }
1363 return (error);
1364 }
1365
1366 /*
1367 * Delete a swap file.
1368 */
1369 static int
1370 swapdel(
1371 struct vnode *vp,
1372 ulong_t lowblk) /* Low block number of area to delete. */
1373 {
1374 struct swapinfo **sipp, *osip = NULL;
1375 struct vnode *cvp;
1376 u_offset_t soff;
1377 int error = 0;
1378 u_offset_t toff = 0;
1379 struct vnode *tvp = NULL;
1380 spgcnt_t pages;
1381 struct anon **app, *ap;
1382 kmutex_t *ahm;
1383 pgcnt_t adjust_swap = 0;
1384
1385 /* Find the swap file entry for the file to be deleted */
1386 cvp = common_specvp(vp);
1387
1388
1389 lowblk = lowblk ? lowblk : 1; /* Skip first page (disk label) */
1390 soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
1391
1392 mutex_enter(&swapinfo_lock);
1393 for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
1394 if ((osip->si_vp == cvp) &&
1395 (osip->si_soff == soff) && (osip->si_flags == 0))
1396 break;
1397 }
1398
1399 /* If the file was not found, error. */
1400 if (osip == NULL) {
1401 error = EINVAL;
1402 mutex_exit(&swapinfo_lock);
1403 goto out;
1404 }
1405
1406 pages = osip->si_npgs;
1407
1408 /*
1409 * Do not delete if we will be low on swap pages.
1410 */
1411 mutex_enter(&anoninfo_lock);
1412
1413 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1414 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1415
1416 mutex_enter(&freemem_lock);
1417 if (((k_anoninfo.ani_max - k_anoninfo.ani_phys_resv) +
1418 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0)) < pages) {
1419 mutex_exit(&freemem_lock);
1420 mutex_exit(&anoninfo_lock);
1421 error = ENOMEM;
1422 cmn_err(CE_WARN, "swapdel - too few free pages");
1423 mutex_exit(&swapinfo_lock);
1424 goto out;
1425 }
1426 mutex_exit(&freemem_lock);
1427
1428 k_anoninfo.ani_max -= pages;
1429
1430 /* If needed, reserve memory swap to replace old device */
1431 if (k_anoninfo.ani_phys_resv > k_anoninfo.ani_max) {
1432 adjust_swap = k_anoninfo.ani_phys_resv - k_anoninfo.ani_max;
1433 k_anoninfo.ani_phys_resv -= adjust_swap;
1434 k_anoninfo.ani_mem_resv += adjust_swap;
1435 mutex_enter(&freemem_lock);
1436 availrmem -= adjust_swap;
1437 mutex_exit(&freemem_lock);
1438 ANI_ADD(adjust_swap);
1439 }
1440 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1441 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1442 mutex_exit(&anoninfo_lock);
1443
1444 ANI_ADD(-pages);
1445
1446 /*
1447 * Set the delete flag. This prevents anyone from allocating more
1448 * pages from this file. Also set ST_DOINGDEL. Someone who wants to
1449 * add the file back while we're deleting it will signify by clearing
1450 * this flag.
1451 */
1452 osip->si_flags |= ST_INDEL|ST_DOINGDEL;
1453 mutex_exit(&swapinfo_lock);
1454
1455 /*
1456 * Free all the allocated physical slots for this file. We do this
1457 * by walking through the entire anon hash array, because we need
1458 * to update all the anon slots that have physical swap slots on
1459 * this file, and this is the only way to find them all. We go back
1460 * to the beginning of a bucket after each slot is freed because the
1461 * anonhash_lock is not held during the free and thus the hash table
1462 * may change under us.
1463 */
1464 for (app = anon_hash; app < &anon_hash[ANON_HASH_SIZE]; app++) {
1465 ahm = &anonhash_lock[(app - anon_hash) &
1466 (AH_LOCK_SIZE - 1)].pad_mutex;
1467 mutex_enter(ahm);
1468 top:
1469 for (ap = *app; ap != NULL; ap = ap->an_hash) {
1470 if (ap->an_pvp == cvp &&
1471 ap->an_poff >= osip->si_soff &&
1472 ap->an_poff < osip->si_eoff) {
1473 ASSERT(TESTBIT(osip->si_swapslots,
1474 btop((size_t)(ap->an_poff -
1475 osip->si_soff))));
1476 tvp = ap->an_vp;
1477 toff = ap->an_off;
1478 VN_HOLD(tvp);
1479 mutex_exit(ahm);
1480
1481 error = swapslot_free(tvp, toff, osip);
1482
1483 VN_RELE(tvp);
1484 mutex_enter(ahm);
1485 if (!error && (osip->si_flags & ST_DOINGDEL)) {
1486 goto top;
1487 } else {
1488 if (error) {
1489 cmn_err(CE_WARN,
1490 "swapslot_free failed %d",
1491 error);
1492 }
1493
1494 /*
1495 * Add device back before making it
1496 * visible.
1497 */
1498 mutex_enter(&swapinfo_lock);
1499 osip->si_flags &=
1500 ~(ST_INDEL | ST_DOINGDEL);
1501 mutex_exit(&swapinfo_lock);
1502
1503 /*
1504 * Update the anon space available
1505 */
1506 mutex_enter(&anoninfo_lock);
1507
1508 k_anoninfo.ani_phys_resv += adjust_swap;
1509 k_anoninfo.ani_mem_resv -= adjust_swap;
1510 k_anoninfo.ani_max += pages;
1511
1512 mutex_enter(&freemem_lock);
1513 availrmem += adjust_swap;
1514 mutex_exit(&freemem_lock);
1515
1516 mutex_exit(&anoninfo_lock);
1517
1518 ANI_ADD(pages);
1519
1520 mutex_exit(ahm);
1521 goto out;
1522 }
1523 }
1524 }
1525 mutex_exit(ahm);
1526 }
1527
1528 /* All done, they'd better all be free! */
1529 mutex_enter(&swapinfo_lock);
1530 ASSERT(osip->si_nfpgs == osip->si_npgs);
1531
1532 /* Now remove it from the swapinfo list */
1533 for (sipp = &swapinfo; *sipp != NULL; sipp = &(*sipp)->si_next) {
1534 if (*sipp == osip)
1535 break;
1536 }
1537 ASSERT(*sipp);
1538 *sipp = osip->si_next;
1539 if (silast == osip)
1540 if ((silast = osip->si_next) == NULL)
1541 silast = swapinfo;
1542 nswapfiles--;
1543 mutex_exit(&swapinfo_lock);
1544
1545 kmem_free(osip->si_swapslots, osip->si_mapsize);
1546 kmem_free(osip->si_pname, osip->si_pnamelen);
1547 kmem_free(osip, sizeof (*osip));
1548
1549 mutex_enter(&dump_lock);
1550 if (cvp == dumpvp)
1551 dumpfini();
1552 mutex_exit(&dump_lock);
1553
1554 /* Release the vnode */
1555
1556 mutex_enter(&swap_lock);
1557 (void) VOP_CLOSE(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(), NULL);
1558 mutex_enter(&cvp->v_lock);
1559 cvp->v_flag &= ~VISSWAP;
1560 mutex_exit(&cvp->v_lock);
1561 VN_RELE(cvp);
1562 mutex_exit(&swap_lock);
1563 out:
1564 return (error);
1565 }
1566
1567 /*
1568 * Free up a physical swap slot on swapinfo sip, currently in use by the
1569 * anonymous page whose name is (vp, off).
1570 */
1571 static int
1572 swapslot_free(
1573 struct vnode *vp,
1574 u_offset_t off,
1575 struct swapinfo *sip)
1576 {
1577 struct page *pp = NULL;
1578 struct anon *ap = NULL;
1579 int error = 0;
1580 kmutex_t *ahm;
1581 struct vnode *pvp = NULL;
1582 u_offset_t poff;
1583 int alloc_pg = 0;
1584
1585 ASSERT(sip->si_vp != NULL);
1586 /*
1587 * Get the page for the old swap slot if exists or create a new one.
1588 */
1589 again:
1590 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
1591 pp = page_create_va(vp, off, PAGESIZE, PG_WAIT | PG_EXCL,
1592 segkmap, NULL);
1593 if (pp == NULL)
1594 goto again;
1595 alloc_pg = 1;
1596
1597 error = swap_getphysname(vp, off, &pvp, &poff);
1598 if (error || pvp != sip->si_vp || poff < sip->si_soff ||
1599 poff >= sip->si_eoff) {
1600 page_io_unlock(pp);
1601 /*LINTED: constant in conditional context*/
1602 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1603 return (0);
1604 }
1605
1606 error = VOP_PAGEIO(pvp, pp, poff, PAGESIZE, B_READ,
1607 CRED(), NULL);
1608 if (error) {
1609 page_io_unlock(pp);
1610 if (error == EFAULT)
1611 error = 0;
1612 /*LINTED: constant in conditional context*/
1613 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1614 return (error);
1615 }
1616 }
1617
1618 /*
1619 * The anon could have been removed by anon_decref* and/or reallocated
1620 * by anon layer (an_pvp == NULL) with the same vp, off.
1621 * In this case the page which has been allocated needs to
1622 * be freed.
1623 */
1624 if (!alloc_pg)
1625 page_io_lock(pp);
1626 ahm = AH_MUTEX(vp, off);
1627 mutex_enter(ahm);
1628 ap = swap_anon(vp, off);
1629 if ((ap == NULL || ap->an_pvp == NULL) && alloc_pg) {
1630 mutex_exit(ahm);
1631 page_io_unlock(pp);
1632 /*LINTED: constant in conditional context*/
1633 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1634 return (0);
1635 }
1636
1637 /*
1638 * Free the physical slot. It may have been freed up and replaced with
1639 * another one while we were getting the page so we have to re-verify
1640 * that this is really one we want. If we do free the slot we have
1641 * to mark the page modified, as its backing store is now gone.
1642 */
1643 if ((ap != NULL) && (ap->an_pvp == sip->si_vp && ap->an_poff >=
1644 sip->si_soff && ap->an_poff < sip->si_eoff)) {
1645 swap_phys_free(ap->an_pvp, ap->an_poff, PAGESIZE);
1646 ap->an_pvp = NULL;
1647 ap->an_poff = 0;
1648 mutex_exit(ahm);
1649 hat_setmod(pp);
1650 } else {
1651 mutex_exit(ahm);
1652 }
1653 page_io_unlock(pp);
1654 page_unlock(pp);
1655 return (0);
1656 }
1657
1658
1659 /*
1660 * Get contig physical backing store for vp, in the range
1661 * [*offp, *offp + *lenp), May back a subrange of this, but must
1662 * always include the requested offset or fail. Returns the offsets
1663 * backed as [*offp, *offp + *lenp) and the physical offsets used to
1664 * back them from *pvpp in the range [*pstartp, *pstartp + *lenp).
1665 * Returns 0 for success
1666 * SE_NOANON -- no anon slot for requested paged
1667 * SE_NOSWAP -- no physical swap space available
1668 * SE_NODEV -- no swap devices on this system
1669 */
1670 int
1671 swap_newphysname(
1672 struct vnode *vp,
1673 u_offset_t offset,
1674 u_offset_t *offp,
1675 size_t *lenp,
1676 struct vnode **pvpp,
1677 u_offset_t *poffp)
1678 {
1679 struct anon *ap = NULL; /* anon slot for vp, off */
1680 int error = 0;
1681 struct vnode *pvp;
1682 u_offset_t poff, pstart, prem;
1683 size_t plen;
1684 u_offset_t off, start;
1685 kmutex_t *ahm;
1686
1687 ASSERT(*offp <= offset && offset < *offp + *lenp);
1688
1689 /* Get new physical swap slots. */
1690 plen = *lenp;
1691 error = swap_phys_alloc(&pvp, &pstart, &plen, 0);
1692 if (error != 1) {
1693 /*
1694 * No swap available so return error unless requested
1695 * offset is already backed in which case return that.
1696 */
1697 ahm = AH_MUTEX(vp, offset);
1698 mutex_enter(ahm);
1699 if ((ap = swap_anon(vp, offset)) == NULL) {
1700 error = SE_NOANON;
1701 mutex_exit(ahm);
1702 return (error);
1703 }
1704 error = (ap->an_pvp ? 0 : (error == 0) ? SE_NOSWAP : SE_NODEV);
1705 *offp = offset;
1706 *lenp = PAGESIZE;
1707 *pvpp = ap->an_pvp;
1708 *poffp = ap->an_poff;
1709 mutex_exit(ahm);
1710 return (error);
1711 }
1712
1713 /*
1714 * We got plen (<= *lenp) contig slots. Use these to back a
1715 * subrange of [*offp, *offp + *lenp) which includes offset.
1716 * For now we just put offset at the end of the kluster.
1717 * Clearly there are other possible choices - which is best?
1718 */
1719 start = MAX(*offp,
1720 (offset + PAGESIZE > plen) ? (offset + PAGESIZE - plen) : 0);
1721 ASSERT(start + plen <= *offp + *lenp);
1722
1723 for (off = start, poff = pstart; poff < pstart + plen;
1724 off += PAGESIZE, poff += PAGESIZE) {
1725 ahm = AH_MUTEX(vp, off);
1726 mutex_enter(ahm);
1727 if ((ap = swap_anon(vp, off)) != NULL) {
1728 /* Free old slot if any, and assign new one */
1729 if (ap->an_pvp)
1730 swap_phys_free(ap->an_pvp, ap->an_poff,
1731 PAGESIZE);
1732 ap->an_pvp = pvp;
1733 ap->an_poff = poff;
1734 } else { /* No anon slot for a klustered page, quit. */
1735 prem = (pstart + plen) - poff;
1736 /* Already did requested page, do partial kluster */
1737 if (off > offset) {
1738 plen = poff - pstart;
1739 error = 0;
1740 /* Fail on requested page, error */
1741 } else if (off == offset) {
1742 error = SE_NOANON;
1743 /* Fail on prior page, fail on requested page, error */
1744 } else if ((ap = swap_anon(vp, offset)) == NULL) {
1745 error = SE_NOANON;
1746 /* Fail on prior page, got requested page, do only it */
1747 } else {
1748 /* Free old slot if any, and assign new one */
1749 if (ap->an_pvp)
1750 swap_phys_free(ap->an_pvp, ap->an_poff,
1751 PAGESIZE);
1752 ap->an_pvp = pvp;
1753 ap->an_poff = poff;
1754 /* One page kluster */
1755 start = offset;
1756 plen = PAGESIZE;
1757 pstart = poff;
1758 poff += PAGESIZE;
1759 prem -= PAGESIZE;
1760 }
1761 /* Free unassigned slots */
1762 swap_phys_free(pvp, poff, prem);
1763 mutex_exit(ahm);
1764 break;
1765 }
1766 mutex_exit(ahm);
1767 }
1768 ASSERT(*offp <= start && start + plen <= *offp + *lenp);
1769 ASSERT(start <= offset && offset < start + plen);
1770 *offp = start;
1771 *lenp = plen;
1772 *pvpp = pvp;
1773 *poffp = pstart;
1774 return (error);
1775 }
1776
1777
1778 /*
1779 * Get the physical swap backing store location for a given anonymous page
1780 * named (vp, off). The backing store name is returned in (*pvpp, *poffp).
1781 * Returns 0 success
1782 * EIDRM -- no anon slot (page is not allocated)
1783 */
1784 int
1785 swap_getphysname(
1786 struct vnode *vp,
1787 u_offset_t off,
1788 struct vnode **pvpp,
1789 u_offset_t *poffp)
1790 {
1791 struct anon *ap;
1792 int error = 0;
1793 kmutex_t *ahm;
1794
1795 ahm = AH_MUTEX(vp, off);
1796 mutex_enter(ahm);
1797
1798 /* Get anon slot for vp, off */
1799 ap = swap_anon(vp, off);
1800 if (ap == NULL) {
1801 error = EIDRM;
1802 goto out;
1803 }
1804 *pvpp = ap->an_pvp;
1805 *poffp = ap->an_poff;
1806 out:
1807 mutex_exit(ahm);
1808 return (error);
1809 }