Print this page
XXXX give me a better summary


2009                                     flags, cr);
2010                                 if (err != 0)
2011                                         break;
2012                         }
2013                 }
2014         }
2015         /* If invalidating, verify all pages on vnode list are gone. */
2016         if (err == 0 && off == 0 && len == 0 &&
2017             (flags & B_INVAL) && vn_has_cached_data(vp)) {
2018                 panic("tmp_putpage: B_INVAL, pages not gone");
2019                 /*NOTREACHED*/
2020         }
2021 out:
2022         if ((curproc == proc_pageout) || dolock)
2023                 rw_exit(&tp->tn_contents);
2024         /*
2025          * Only reason putapage is going to give us SE_NOSWAP as error
2026          * is when we ask a page to be written to physical backing store
2027          * and there is none. Ignore this because we might be dealing
2028          * with a swap page which does not have any backing store
2029          * on disk. In any other case we won't get this error over here.

2030          */
2031         if (err == SE_NOSWAP)
2032                 err = 0;
2033         return (err);
2034 }
2035 
2036 long tmp_putpagecnt, tmp_pagespushed;
2037 
2038 /*
2039  * Write out a single page.
2040  * For tmpfs this means choose a physical swap slot and write the page
2041  * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
2042  * we try to find a bunch of other dirty pages adjacent in the file
2043  * and a bunch of contiguous swap slots, and then write all the pages
2044  * out in a single i/o.
2045  */
2046 /*ARGSUSED*/
2047 static int
2048 tmp_putapage(
2049         struct vnode *vp,
2050         page_t *pp,
2051         u_offset_t *offp,


2074         tmp_klustsize = klustsize;
2075         offset = pp->p_offset;
2076         klstart = (offset / tmp_klustsize) * tmp_klustsize;
2077         kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
2078 
2079         /* Get a kluster of pages */
2080         pplist =
2081             pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
2082 
2083         pp_off = (size_t)tmpoff;
2084 
2085         /*
2086          * Get a cluster of physical offsets for the pages; the amount we
2087          * get may be some subrange of what we ask for (io_off, io_len).
2088          */
2089         io_off = pp_off;
2090         io_len = pp_len;
2091         err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
2092         ASSERT(err != SE_NOANON); /* anon slot must have been filled */
2093         if (err) {
2094                 pvn_write_done(pplist, B_ERROR | B_WRITE | flags);


2095                 /*
2096                  * If this routine is called as a result of segvn_sync
2097                  * operation and we have no physical swap then we can get an
2098                  * error here. In such case we would return SE_NOSWAP as error.
2099                  * At this point, we expect only SE_NOSWAP.
2100                  */
2101                 ASSERT(err == SE_NOSWAP);
2102                 if (flags & B_INVAL)
2103                         err = ENOMEM;
2104                 goto out;
2105         }
2106         ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
2107         ASSERT(io_off <= offset && offset < io_off + io_len);
2108 
2109         /* Toss pages at front/rear that we couldn't get physical backing for */
2110         if (io_off != pp_off) {
2111                 npplist = NULL;
2112                 page_list_break(&pplist, &npplist, btop(io_off - pp_off));
2113                 ASSERT(pplist->p_offset == pp_off);
2114                 ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
2115                 pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2116                 pplist = npplist;
2117         }
2118         if (io_off + io_len < pp_off + pp_len) {
2119                 npplist = NULL;
2120                 page_list_break(&pplist, &npplist, btop(io_len));
2121                 ASSERT(npplist->p_offset == io_off + io_len);
2122                 ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);


2126         ASSERT(pplist->p_offset == io_off);
2127         ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
2128         ASSERT(btopr(io_len) <= btopr(kllen));
2129 
2130         /* Do i/o on the remaining kluster */
2131         err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2132             B_WRITE | flags, cr, NULL);
2133 
2134         if ((flags & B_ASYNC) == 0) {
2135                 pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2136         }
2137 out:
2138         if (!err) {
2139                 if (offp)
2140                         *offp = io_off;
2141                 if (lenp)
2142                         *lenp = io_len;
2143                 tmp_putpagecnt++;
2144                 tmp_pagespushed += btop(io_len);
2145         }
2146         if (err && err != ENOMEM && err != SE_NOSWAP)
2147                 cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
2148         return (err);
2149 }
2150 
2151 /* ARGSUSED */
2152 static int
2153 tmp_map(
2154         struct vnode *vp,
2155         offset_t off,
2156         struct as *as,
2157         caddr_t *addrp,
2158         size_t len,
2159         uchar_t prot,
2160         uchar_t maxprot,
2161         uint_t flags,
2162         struct cred *cred,
2163         caller_context_t *ct)
2164 {
2165         struct segvn_crargs vn_a;
2166         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);




2009                                     flags, cr);
2010                                 if (err != 0)
2011                                         break;
2012                         }
2013                 }
2014         }
2015         /* If invalidating, verify all pages on vnode list are gone. */
2016         if (err == 0 && off == 0 && len == 0 &&
2017             (flags & B_INVAL) && vn_has_cached_data(vp)) {
2018                 panic("tmp_putpage: B_INVAL, pages not gone");
2019                 /*NOTREACHED*/
2020         }
2021 out:
2022         if ((curproc == proc_pageout) || dolock)
2023                 rw_exit(&tp->tn_contents);
2024         /*
2025          * Only reason putapage is going to give us SE_NOSWAP as error
2026          * is when we ask a page to be written to physical backing store
2027          * and there is none. Ignore this because we might be dealing
2028          * with a swap page which does not have any backing store
2029          * on disk, though the newer SE_NODEV error should cover that.
2030          * In any other case we won't get this error over here.
2031          */
2032         if (err == SE_NOSWAP || err == SE_NODEV)
2033                 err = 0;
2034         return (err);
2035 }
2036 
2037 long tmp_putpagecnt, tmp_pagespushed;
2038 
2039 /*
2040  * Write out a single page.
2041  * For tmpfs this means choose a physical swap slot and write the page
2042  * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
2043  * we try to find a bunch of other dirty pages adjacent in the file
2044  * and a bunch of contiguous swap slots, and then write all the pages
2045  * out in a single i/o.
2046  */
2047 /*ARGSUSED*/
2048 static int
2049 tmp_putapage(
2050         struct vnode *vp,
2051         page_t *pp,
2052         u_offset_t *offp,


2075         tmp_klustsize = klustsize;
2076         offset = pp->p_offset;
2077         klstart = (offset / tmp_klustsize) * tmp_klustsize;
2078         kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
2079 
2080         /* Get a kluster of pages */
2081         pplist =
2082             pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
2083 
2084         pp_off = (size_t)tmpoff;
2085 
2086         /*
2087          * Get a cluster of physical offsets for the pages; the amount we
2088          * get may be some subrange of what we ask for (io_off, io_len).
2089          */
2090         io_off = pp_off;
2091         io_len = pp_len;
2092         err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
2093         ASSERT(err != SE_NOANON); /* anon slot must have been filled */
2094         if (err) {
2095                 ASSERT(err == SE_NOSWAP || err == SE_NODEV);
2096                 pvn_write_done(pplist, (err == SE_NODEV ? 0 : B_ERROR) |
2097                     B_WRITE | flags);
2098                 /*
2099                  * If this routine is called as a result of segvn_sync
2100                  * operation and we have no physical swap then we can get an
2101                  * error here. In such case we would return SE_NOSWAP as error.

2102                  */
2103                 if ((flags & B_INVAL) && err == SE_NOSWAP)

2104                         err = ENOMEM;
2105                 goto out;
2106         }
2107         ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
2108         ASSERT(io_off <= offset && offset < io_off + io_len);
2109 
2110         /* Toss pages at front/rear that we couldn't get physical backing for */
2111         if (io_off != pp_off) {
2112                 npplist = NULL;
2113                 page_list_break(&pplist, &npplist, btop(io_off - pp_off));
2114                 ASSERT(pplist->p_offset == pp_off);
2115                 ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
2116                 pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2117                 pplist = npplist;
2118         }
2119         if (io_off + io_len < pp_off + pp_len) {
2120                 npplist = NULL;
2121                 page_list_break(&pplist, &npplist, btop(io_len));
2122                 ASSERT(npplist->p_offset == io_off + io_len);
2123                 ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);


2127         ASSERT(pplist->p_offset == io_off);
2128         ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
2129         ASSERT(btopr(io_len) <= btopr(kllen));
2130 
2131         /* Do i/o on the remaining kluster */
2132         err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2133             B_WRITE | flags, cr, NULL);
2134 
2135         if ((flags & B_ASYNC) == 0) {
2136                 pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2137         }
2138 out:
2139         if (!err) {
2140                 if (offp)
2141                         *offp = io_off;
2142                 if (lenp)
2143                         *lenp = io_len;
2144                 tmp_putpagecnt++;
2145                 tmp_pagespushed += btop(io_len);
2146         }
2147         if (err && err != ENOMEM && err != SE_NOSWAP && err != SE_NODEV)
2148                 cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
2149         return (err);
2150 }
2151 
2152 /* ARGSUSED */
2153 static int
2154 tmp_map(
2155         struct vnode *vp,
2156         offset_t off,
2157         struct as *as,
2158         caddr_t *addrp,
2159         size_t len,
2160         uchar_t prot,
2161         uchar_t maxprot,
2162         uint_t flags,
2163         struct cred *cred,
2164         caller_context_t *ct)
2165 {
2166         struct segvn_crargs vn_a;
2167         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);