Print this page
@@ -684,21 +684,10 @@
return (0);
if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
limit = MAXOFFSET_T;
- /*
- * Pre-fault the pages to ensure slow (eg NFS) pages
- * don't hold up txg.
- * Skip this if uio contains loaned arc_buf.
- */
- if ((uio->uio_extflg == UIO_XUIO) &&
- (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
- xuio = (xuio_t *)uio;
- else
- uio_prefaultpages(n, uio);
-
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
@@ -747,10 +736,21 @@
ZFS_EXIT(zfsvfs);
return (error);
}
/*
+ * Pre-fault the pages to ensure slow (eg NFS) pages
+ * don't hold up txg.
+ * Skip this if uio contains loaned arc_buf.
+ */
+ if ((uio->uio_extflg == UIO_XUIO) &&
+ (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
+ xuio = (xuio_t *)uio;
+ else
+ uio_prefaultpages(MIN(n, max_blksz), uio);
+
+ /*
* If in append mode, set the io offset pointer to eof.
*/
if (ioflag & FAPPEND) {
/*
* Obtain an appending range lock to guarantee file append
@@ -990,10 +990,13 @@
if (prev_error != 0 || error != 0)
break;
ASSERT(tx_bytes == nbytes);
n -= nbytes;
+
+ if (!xuio && n > 0)
+ uio_prefaultpages(MIN(n, max_blksz), uio);
}
zfs_range_unlock(rl);
/*
@@ -4256,12 +4259,10 @@
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
B_TRUE);
- err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
-
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
}
dmu_tx_commit(tx);
out:
@@ -4793,10 +4794,14 @@
uint64_t pages = btopr(len);
ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
+ if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
+ vn_has_cached_data(vp))
+ (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
+
return (0);
}
/*
* Free or allocate space in a file. Currently, this function only