4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 /*
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
33 *
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
37 */
38
39 /*
40 * VM - paged vnode.
41 *
42 * This file supplies vm support for the vnode operations that deal with pages.
43 */
415 if (flags & B_ERROR) {
416 /*
417 * Write operation failed. We don't want
418 * to destroy (or free) the page unless B_FORCE
419 * is set. We set the mod bit again and release
420 * all locks on the page so that it will get written
421 * back again later when things are hopefully
422 * better again.
423 * If B_INVAL and B_FORCE is set we really have
424 * to destroy the page.
425 */
426 if ((flags & (B_INVAL|B_FORCE)) == (B_INVAL|B_FORCE)) {
427 page_io_unlock(pp);
428 /*LINTED: constant in conditional context*/
429 VN_DISPOSE(pp, B_INVAL, 0, kcred);
430 } else {
431 hat_setmod_only(pp);
432 page_io_unlock(pp);
433 page_unlock(pp);
434 }
435 } else if (flags & B_INVAL) {
436 /*
437 * XXX - Failed writes with B_INVAL set are
438 * not handled appropriately.
439 */
440 page_io_unlock(pp);
441 /*LINTED: constant in conditional context*/
442 VN_DISPOSE(pp, B_INVAL, 0, kcred);
443 } else if (flags & B_FREE ||!hat_page_is_mapped(pp)) {
444 /*
445 * Update statistics for pages being paged out
446 */
447 if (pp->p_vnode) {
448 if (IS_SWAPFSVP(pp->p_vnode)) {
449 anonpgout++;
450 } else {
451 if (pp->p_vnode->v_flag & VVMEXEC) {
452 execpgout++;
453 } else {
454 fspgout++;
455 }
456 }
556 CPU_STATS_ADDQ(cpup, vm, pgrec, pgrec);
557 CPU_STATS_ADDQ(cpup, vm, pgout, pgout);
558 CPU_STATS_ADDQ(cpup, vm, pgpgout, pgpgout);
559 CPU_STATS_ADDQ(cpup, vm, anonpgout, anonpgout);
560 CPU_STATS_ADDQ(cpup, vm, anonfree, anonfree);
561 CPU_STATS_ADDQ(cpup, vm, fspgout, fspgout);
562 CPU_STATS_ADDQ(cpup, vm, fsfree, fsfree);
563 CPU_STATS_ADDQ(cpup, vm, execpgout, execpgout);
564 CPU_STATS_ADDQ(cpup, vm, execfree, execfree);
565 CPU_STATS_EXIT_K();
566
567 /* Kernel probe */
568 TNF_PROBE_4(pageout, "vm pageio io", /* CSTYLED */,
569 tnf_opaque, vnode, vp,
570 tnf_ulong, pages_pageout, pgpgout,
571 tnf_ulong, pages_freed, dfree,
572 tnf_ulong, pages_reclaimed, pgrec);
573 }
574
575 /*
576 * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED, B_DELWRI,
577 * B_TRUNC, B_FORCE}. B_DELWRI indicates that this page is part of a kluster
578 * operation and is only to be considered if it doesn't involve any
579 * waiting here. B_TRUNC indicates that the file is being truncated
580 * and so no i/o needs to be done. B_FORCE indicates that the page
581 * must be destroyed so don't try wrting it out.
582 *
583 * The caller must ensure that the page is locked. Returns 1, if
584 * the page should be written back (the "iolock" is held in this
585 * case), or 0 if the page has been dealt with or has been
586 * unlocked.
587 */
588 int
589 pvn_getdirty(page_t *pp, int flags)
590 {
591 ASSERT((flags & (B_INVAL | B_FREE)) ?
592 PAGE_EXCL(pp) : PAGE_SHARED(pp));
593 ASSERT(PP_ISFREE(pp) == 0);
594
595 /*
596 * If trying to invalidate or free a logically `locked' page,
597 * forget it. Don't need page_struct_lock to check p_lckcnt and
611 * If B_DELWRI is set, which implies that this request is
612 * due to a klustering operartion.
613 *
614 * If this is an async (B_ASYNC) operation and we are not doing
615 * invalidation (B_INVAL) [The current i/o or fsflush will ensure
616 * that the the page is written out].
617 */
618 if ((flags & B_DELWRI) || ((flags & (B_INVAL | B_ASYNC)) == B_ASYNC)) {
619 if (!page_io_trylock(pp)) {
620 page_unlock(pp);
621 return (0);
622 }
623 } else {
624 page_io_lock(pp);
625 }
626
627 /*
628 * If we want to free or invalidate the page then
629 * we need to unload it so that anyone who wants
630 * it will have to take a minor fault to get it.
631 * Otherwise, we're just writing the page back so we
632 * need to sync up the hardwre and software mod bit to
633 * detect any future modifications. We clear the
634 * software mod bit when we put the page on the dirty
635 * list.
636 */
637 if (flags & (B_INVAL | B_FREE)) {
638 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
639 } else {
640 (void) hat_pagesync(pp, HAT_SYNC_ZERORM);
641 }
642
643 if (!hat_ismod(pp) || (flags & B_TRUNC)) {
644 /*
645 * Don't need to add it to the
646 * list after all.
647 */
648 page_io_unlock(pp);
649 if (flags & B_INVAL) {
650 /*LINTED: constant in conditional context*/
651 VN_DISPOSE(pp, B_INVAL, 0, kcred);
652 } else if (flags & B_FREE) {
653 /*LINTED: constant in conditional context*/
654 VN_DISPOSE(pp, B_FREE, (flags & B_DONTNEED), kcred);
655 } else {
656 /*
657 * This is advisory path for the callers
658 * of VOP_PUTPAGE() who prefer freeing the
659 * page _only_ if no one else is accessing it.
660 * E.g. segmap_release()
661 *
662 * The above hat_ismod() check is useless because:
663 * (1) we may not be holding SE_EXCL lock;
664 * (2) we've not unloaded _all_ translations
665 *
666 * Let page_release() do the heavy-lifting.
667 */
668 (void) page_release(pp, 1);
669 }
670 return (0);
671 }
672
673 /*
674 * Page is dirty, get it ready for the write back
675 * and add page to the dirty list.
676 */
677 hat_clrrefmod(pp);
678
679 /*
680 * If we're going to free the page when we're done
681 * then we can let others try to use it starting now.
682 * We'll detect the fact that they used it when the
683 * i/o is done and avoid freeing the page.
684 */
685 if (flags & B_FREE)
686 page_downgrade(pp);
687
688
689 TRACE_1(TR_FAC_VM, TR_PVN_GETDIRTY, "pvn_getdirty:pp %p", pp);
690
691 return (1);
692 }
693
694
695 /*ARGSUSED*/
696 static int
697 marker_constructor(void *buf, void *cdrarg, int kmflags)
698 {
699 page_t *mark = buf;
700 bzero(mark, sizeof (page_t));
701 mark->p_hash = PVN_VPLIST_HASH_TAG;
702 return (0);
703 }
704
705 void
|
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 */
26
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
29
30 /*
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
33 * All Rights Reserved
34 *
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
37 * contributors.
38 */
39
40 /*
41 * VM - paged vnode.
42 *
43 * This file supplies vm support for the vnode operations that deal with pages.
44 */
416 if (flags & B_ERROR) {
417 /*
418 * Write operation failed. We don't want
419 * to destroy (or free) the page unless B_FORCE
420 * is set. We set the mod bit again and release
421 * all locks on the page so that it will get written
422 * back again later when things are hopefully
423 * better again.
424 * If B_INVAL and B_FORCE is set we really have
425 * to destroy the page.
426 */
427 if ((flags & (B_INVAL|B_FORCE)) == (B_INVAL|B_FORCE)) {
428 page_io_unlock(pp);
429 /*LINTED: constant in conditional context*/
430 VN_DISPOSE(pp, B_INVAL, 0, kcred);
431 } else {
432 hat_setmod_only(pp);
433 page_io_unlock(pp);
434 page_unlock(pp);
435 }
436 } else if ((flags & (B_INVAL | B_INVALCURONLY)) == B_INVAL) {
437 /*
438 * If B_INVALCURONLY is set, then we handle that case
439 * in the next conditional if hat_page_is_mapped()
440 * indicates that there are no additional mappings
441 * to the page.
442 */
443
444 /*
445 * XXX - Failed writes with B_INVAL set are
446 * not handled appropriately.
447 */
448 page_io_unlock(pp);
449 /*LINTED: constant in conditional context*/
450 VN_DISPOSE(pp, B_INVAL, 0, kcred);
451 } else if (flags & B_FREE ||!hat_page_is_mapped(pp)) {
452 /*
453 * Update statistics for pages being paged out
454 */
455 if (pp->p_vnode) {
456 if (IS_SWAPFSVP(pp->p_vnode)) {
457 anonpgout++;
458 } else {
459 if (pp->p_vnode->v_flag & VVMEXEC) {
460 execpgout++;
461 } else {
462 fspgout++;
463 }
464 }
564 CPU_STATS_ADDQ(cpup, vm, pgrec, pgrec);
565 CPU_STATS_ADDQ(cpup, vm, pgout, pgout);
566 CPU_STATS_ADDQ(cpup, vm, pgpgout, pgpgout);
567 CPU_STATS_ADDQ(cpup, vm, anonpgout, anonpgout);
568 CPU_STATS_ADDQ(cpup, vm, anonfree, anonfree);
569 CPU_STATS_ADDQ(cpup, vm, fspgout, fspgout);
570 CPU_STATS_ADDQ(cpup, vm, fsfree, fsfree);
571 CPU_STATS_ADDQ(cpup, vm, execpgout, execpgout);
572 CPU_STATS_ADDQ(cpup, vm, execfree, execfree);
573 CPU_STATS_EXIT_K();
574
575 /* Kernel probe */
576 TNF_PROBE_4(pageout, "vm pageio io", /* CSTYLED */,
577 tnf_opaque, vnode, vp,
578 tnf_ulong, pages_pageout, pgpgout,
579 tnf_ulong, pages_freed, dfree,
580 tnf_ulong, pages_reclaimed, pgrec);
581 }
582
583 /*
584 * Flags are composed of {B_ASYNC, B_INVAL, B_INVALCURONLY, B_FREE,
585 * B_DONTNEED, B_DELWRI, B_TRUNC, B_FORCE}.
586 * B_DELWRI indicates that this page is part of a kluster
587 * operation and is only to be considered if it doesn't involve any
588 * waiting here. B_TRUNC indicates that the file is being truncated
589 * and so no i/o needs to be done. B_FORCE indicates that the page
590 * must be destroyed so don't try wrting it out.
591 *
592 * The caller must ensure that the page is locked. Returns 1, if
593 * the page should be written back (the "iolock" is held in this
594 * case), or 0 if the page has been dealt with or has been
595 * unlocked.
596 */
597 int
598 pvn_getdirty(page_t *pp, int flags)
599 {
600 ASSERT((flags & (B_INVAL | B_FREE)) ?
601 PAGE_EXCL(pp) : PAGE_SHARED(pp));
602 ASSERT(PP_ISFREE(pp) == 0);
603
604 /*
605 * If trying to invalidate or free a logically `locked' page,
606 * forget it. Don't need page_struct_lock to check p_lckcnt and
620 * If B_DELWRI is set, which implies that this request is
621 * due to a klustering operartion.
622 *
623 * If this is an async (B_ASYNC) operation and we are not doing
624 * invalidation (B_INVAL) [The current i/o or fsflush will ensure
625 * that the the page is written out].
626 */
627 if ((flags & B_DELWRI) || ((flags & (B_INVAL | B_ASYNC)) == B_ASYNC)) {
628 if (!page_io_trylock(pp)) {
629 page_unlock(pp);
630 return (0);
631 }
632 } else {
633 page_io_lock(pp);
634 }
635
636 /*
637 * If we want to free or invalidate the page then
638 * we need to unload it so that anyone who wants
639 * it will have to take a minor fault to get it.
640 * If we are only invalidating the page for the
641 * current process, then pass in a different flag.
642 * Otherwise, we're just writing the page back so we
643 * need to sync up the hardwre and software mod bit to
644 * detect any future modifications. We clear the
645 * software mod bit when we put the page on the dirty
646 * list.
647 */
648 if (flags & B_INVALCURONLY) {
649 (void) hat_pageunload(pp, HAT_CURPROC_PGUNLOAD);
650 } else if (flags & (B_INVAL | B_FREE)) {
651 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
652 } else {
653 (void) hat_pagesync(pp, HAT_SYNC_ZERORM);
654 }
655
656 if (!hat_ismod(pp) || (flags & B_TRUNC)) {
657 /*
658 * Don't need to add it to the
659 * list after all.
660 */
661 page_io_unlock(pp);
662 if ((flags & (B_INVAL | B_INVALCURONLY)) == B_INVAL) {
663 /*LINTED: constant in conditional context*/
664 VN_DISPOSE(pp, B_INVAL, 0, kcred);
665 } else if (flags & B_FREE) {
666 /*LINTED: constant in conditional context*/
667 VN_DISPOSE(pp, B_FREE, (flags & B_DONTNEED), kcred);
668 } else {
669 /*
670 * This is advisory path for the callers
671 * of VOP_PUTPAGE() who prefer freeing the
672 * page _only_ if no one else is accessing it.
673 * E.g. segmap_release()
674 * We also take this path for B_INVALCURONLY and
675 * let page_release call VN_DISPOSE if no one else is
676 * using the page.
677 *
678 * The above hat_ismod() check is useless because:
679 * (1) we may not be holding SE_EXCL lock;
680 * (2) we've not unloaded _all_ translations
681 *
682 * Let page_release() do the heavy-lifting.
683 */
684 (void) page_release(pp, 1);
685 }
686 return (0);
687 }
688
689 /*
690 * Page is dirty, get it ready for the write back
691 * and add page to the dirty list.
692 */
693 hat_clrrefmod(pp);
694
695 /*
696 * If we're going to free the page when we're done
697 * then we can let others try to use it starting now.
698 * We'll detect the fact that they used it when the
699 * i/o is done and avoid freeing the page.
700 */
701 if (flags & (B_FREE | B_INVALCURONLY))
702 page_downgrade(pp);
703
704
705 TRACE_1(TR_FAC_VM, TR_PVN_GETDIRTY, "pvn_getdirty:pp %p", pp);
706
707 return (1);
708 }
709
710
711 /*ARGSUSED*/
712 static int
713 marker_constructor(void *buf, void *cdrarg, int kmflags)
714 {
715 page_t *mark = buf;
716 bzero(mark, sizeof (page_t));
717 mark->p_hash = PVN_VPLIST_HASH_TAG;
718 return (0);
719 }
720
721 void
|