Print this page
NEX-3046 hsfs asserts instead of handling getpage beyond EOF
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Marcel Telka <marcel.telka@nexenta.com>
Revert "NEX-5659 hsfs asserts instead of handling getpage beyond EOF"
This reverts commit c4b53e5f3fdfec5d117f9719bc4b77581f318442.
NEX-5659 hsfs asserts instead of handling getpage beyond EOF
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Marcel Telka <marcel.telka@nexenta.com>
NEX-2974 cannot exec from hsfs since illumos 5405
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/hsfs/hsfs_vnops.c
+++ new/usr/src/uts/common/fs/hsfs/hsfs_vnops.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 - * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 + * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
26 26 * Copyright (c) 2017 by Delphix. All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * Vnode operations for the High Sierra filesystem
31 31 */
32 32
33 33 #include <sys/types.h>
34 34 #include <sys/t_lock.h>
35 35 #include <sys/param.h>
36 36 #include <sys/time.h>
37 37 #include <sys/systm.h>
38 38 #include <sys/sysmacros.h>
39 39 #include <sys/resource.h>
40 40 #include <sys/signal.h>
41 41 #include <sys/cred.h>
42 42 #include <sys/user.h>
43 43 #include <sys/buf.h>
44 44 #include <sys/vfs.h>
45 45 #include <sys/vfs_opreg.h>
46 46 #include <sys/stat.h>
47 47 #include <sys/vnode.h>
48 48 #include <sys/mode.h>
49 49 #include <sys/proc.h>
50 50 #include <sys/disp.h>
51 51 #include <sys/file.h>
52 52 #include <sys/fcntl.h>
53 53 #include <sys/flock.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/uio.h>
56 56 #include <sys/conf.h>
57 57 #include <sys/errno.h>
58 58 #include <sys/mman.h>
59 59 #include <sys/pathname.h>
60 60 #include <sys/debug.h>
61 61 #include <sys/vmsystm.h>
62 62 #include <sys/cmn_err.h>
63 63 #include <sys/fbuf.h>
64 64 #include <sys/dirent.h>
65 65 #include <sys/errno.h>
66 66 #include <sys/dkio.h>
67 67 #include <sys/cmn_err.h>
68 68 #include <sys/atomic.h>
69 69
70 70 #include <vm/hat.h>
71 71 #include <vm/page.h>
72 72 #include <vm/pvn.h>
73 73 #include <vm/as.h>
74 74 #include <vm/seg.h>
75 75 #include <vm/seg_map.h>
76 76 #include <vm/seg_kmem.h>
77 77 #include <vm/seg_vn.h>
78 78 #include <vm/rm.h>
79 79 #include <vm/page.h>
80 80 #include <sys/swap.h>
81 81 #include <sys/avl.h>
82 82 #include <sys/sunldi.h>
83 83 #include <sys/ddi.h>
84 84 #include <sys/sunddi.h>
85 85 #include <sys/sdt.h>
86 86
87 87 /*
88 88 * For struct modlinkage
89 89 */
90 90 #include <sys/modctl.h>
91 91
92 92 #include <sys/fs/hsfs_spec.h>
93 93 #include <sys/fs/hsfs_node.h>
94 94 #include <sys/fs/hsfs_impl.h>
95 95 #include <sys/fs/hsfs_susp.h>
96 96 #include <sys/fs/hsfs_rrip.h>
97 97
98 98 #include <fs/fs_subr.h>
99 99
100 100 /* # of contiguous requests to detect sequential access pattern */
101 101 static int seq_contig_requests = 2;
102 102
103 103 /*
104 104 * This is the max number os taskq threads that will be created
105 105 * if required. Since we are using a Dynamic TaskQ by default only
106 106 * one thread is created initially.
107 107 *
108 108 * NOTE: In the usual hsfs use case this per fs instance number
109 109 * of taskq threads should not place any undue load on a system.
110 110 * Even on an unusual system with say 100 CDROM drives, 800 threads
111 111 * will not be created unless all the drives are loaded and all
112 112 * of them are saturated with I/O at the same time! If there is at
113 113 * all a complaint of system load due to such an unusual case it
114 114 * should be easy enough to change to one per-machine Dynamic TaskQ
115 115 * for all hsfs mounts with a nthreads of say 32.
116 116 */
117 117 static int hsfs_taskq_nthreads = 8; /* # of taskq threads per fs */
118 118
119 119 /* Min count of adjacent bufs that will avoid buf coalescing */
120 120 static int hsched_coalesce_min = 2;
121 121
122 122 /*
123 123 * Kmem caches for heavily used small allocations. Using these kmem
124 124 * caches provides a factor of 3 reduction in system time and greatly
125 125 * aids overall throughput esp. on SPARC.
126 126 */
127 127 struct kmem_cache *hio_cache;
128 128 struct kmem_cache *hio_info_cache;
129 129
130 130 /*
131 131 * This tunable allows us to ignore inode numbers from rrip-1.12.
132 132 * In this case, we fall back to our default inode algorithm.
133 133 */
134 134 extern int use_rrip_inodes;
135 135
136 136 /*
137 137 * Free behind logic from UFS to tame our thirst for
138 138 * the page cache.
139 139 * See usr/src/uts/common/fs/ufs/ufs_vnops.c for more
140 140 * explanation.
141 141 */
142 142 static int freebehind = 1;
143 143 static int smallfile = 0;
144 144 static int cache_read_ahead = 0;
145 145 static u_offset_t smallfile64 = 32 * 1024;
146 146 #define SMALLFILE1_D 1000
147 147 #define SMALLFILE2_D 10
148 148 static u_offset_t smallfile1 = 32 * 1024;
149 149 static u_offset_t smallfile2 = 32 * 1024;
150 150 static clock_t smallfile_update = 0; /* when to recompute */
151 151 static uint_t smallfile1_d = SMALLFILE1_D;
152 152 static uint_t smallfile2_d = SMALLFILE2_D;
153 153
154 154 static int hsched_deadline_compare(const void *x1, const void *x2);
155 155 static int hsched_offset_compare(const void *x1, const void *x2);
156 156 static void hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra);
157 157 int hsched_invoke_strategy(struct hsfs *fsp);
158 158
159 159 /* ARGSUSED */
160 160 static int
161 161 hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred, caller_context_t *ct)
162 162 {
163 163 return (0);
164 164 }
165 165
166 166
167 167 /*ARGSUSED*/
168 168 static int
169 169 hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
170 170 struct caller_context *ct)
171 171 {
172 172 caddr_t base;
173 173 offset_t diff;
174 174 int error;
175 175 struct hsnode *hp;
176 176 uint_t filesize;
177 177 int dofree;
178 178
179 179 hp = VTOH(vp);
180 180 /*
181 181 * if vp is of type VDIR, make sure dirent
182 182 * is filled up with all info (because of ptbl)
183 183 */
184 184 if (vp->v_type == VDIR) {
185 185 if (hp->hs_dirent.ext_size == 0)
186 186 hs_filldirent(vp, &hp->hs_dirent);
187 187 }
188 188 filesize = hp->hs_dirent.ext_size;
189 189
190 190 /* Sanity checks. */
191 191 if (uiop->uio_resid == 0 || /* No data wanted. */
192 192 uiop->uio_loffset > HS_MAXFILEOFF || /* Offset too big. */
193 193 uiop->uio_loffset >= filesize) /* Past EOF. */
194 194 return (0);
195 195
196 196 do {
197 197 /*
198 198 * We want to ask for only the "right" amount of data.
199 199 * In this case that means:-
200 200 *
201 201 * We can't get data from beyond our EOF. If asked,
202 202 * we will give a short read.
203 203 *
204 204 * segmap_getmapflt returns buffers of MAXBSIZE bytes.
205 205 * These buffers are always MAXBSIZE aligned.
206 206 * If our starting offset is not MAXBSIZE aligned,
207 207 * we can only ask for less than MAXBSIZE bytes.
208 208 *
209 209 * If our requested offset and length are such that
210 210 * they belong in different MAXBSIZE aligned slots
211 211 * then we'll be making more than one call on
212 212 * segmap_getmapflt.
213 213 *
214 214 * This diagram shows the variables we use and their
215 215 * relationships.
216 216 *
217 217 * |<-----MAXBSIZE----->|
218 218 * +--------------------------...+
219 219 * |.....mapon->|<--n-->|....*...|EOF
220 220 * +--------------------------...+
221 221 * uio_loffset->|
222 222 * uio_resid....|<---------->|
223 223 * diff.........|<-------------->|
224 224 *
225 225 * So, in this case our offset is not aligned
226 226 * and our request takes us outside of the
227 227 * MAXBSIZE window. We will break this up into
228 228 * two segmap_getmapflt calls.
229 229 */
230 230 size_t nbytes;
231 231 offset_t mapon;
232 232 size_t n;
233 233 uint_t flags;
234 234
235 235 mapon = uiop->uio_loffset & MAXBOFFSET;
236 236 diff = filesize - uiop->uio_loffset;
237 237 nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
238 238 n = MIN(diff, nbytes);
239 239 if (n <= 0) {
240 240 /* EOF or request satisfied. */
241 241 return (0);
242 242 }
243 243
244 244 /*
245 245 * Freebehind computation taken from:
246 246 * usr/src/uts/common/fs/ufs/ufs_vnops.c
247 247 */
248 248 if (drv_hztousec(ddi_get_lbolt()) >= smallfile_update) {
249 249 uint64_t percpufreeb;
250 250 if (smallfile1_d == 0) smallfile1_d = SMALLFILE1_D;
251 251 if (smallfile2_d == 0) smallfile2_d = SMALLFILE2_D;
252 252 percpufreeb = ptob((uint64_t)freemem) / ncpus_online;
253 253 smallfile1 = percpufreeb / smallfile1_d;
254 254 smallfile2 = percpufreeb / smallfile2_d;
255 255 smallfile1 = MAX(smallfile1, smallfile);
256 256 smallfile1 = MAX(smallfile1, smallfile64);
257 257 smallfile2 = MAX(smallfile1, smallfile2);
258 258 smallfile_update = drv_hztousec(ddi_get_lbolt())
259 259 + 1000000;
260 260 }
261 261
262 262 dofree = freebehind &&
263 263 hp->hs_prev_offset == uiop->uio_loffset &&
264 264 hp->hs_ra_bytes > 0;
265 265
266 266 base = segmap_getmapflt(segkmap, vp,
267 267 (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
268 268
269 269 error = uiomove(base + mapon, n, UIO_READ, uiop);
270 270
271 271 if (error == 0) {
272 272 /*
273 273 * if read a whole block, or read to eof,
274 274 * won't need this buffer again soon.
275 275 */
276 276 if (n + mapon == MAXBSIZE ||
277 277 uiop->uio_loffset == filesize)
278 278 flags = SM_DONTNEED;
279 279 else
280 280 flags = 0;
281 281
282 282 if (dofree) {
283 283 flags = SM_FREE | SM_ASYNC;
284 284 if ((cache_read_ahead == 0) &&
285 285 uiop->uio_loffset > smallfile2)
286 286 flags |= SM_DONTNEED;
287 287 }
288 288
289 289 error = segmap_release(segkmap, base, flags);
290 290 } else
291 291 (void) segmap_release(segkmap, base, 0);
292 292 } while (error == 0 && uiop->uio_resid > 0);
293 293
294 294 return (error);
295 295 }
296 296
297 297 /*ARGSUSED2*/
298 298 static int
299 299 hsfs_getattr(struct vnode *vp, struct vattr *vap, int flags, struct cred *cred,
300 300 caller_context_t *ct)
301 301 {
302 302 struct hsnode *hp;
303 303 struct vfs *vfsp;
304 304 struct hsfs *fsp;
305 305
306 306 hp = VTOH(vp);
307 307 fsp = VFS_TO_HSFS(vp->v_vfsp);
308 308 vfsp = vp->v_vfsp;
309 309
310 310 if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
311 311 hs_filldirent(vp, &hp->hs_dirent);
312 312 }
313 313 vap->va_type = IFTOVT(hp->hs_dirent.mode);
314 314 vap->va_mode = hp->hs_dirent.mode;
315 315 vap->va_uid = hp->hs_dirent.uid;
316 316 vap->va_gid = hp->hs_dirent.gid;
317 317
318 318 vap->va_fsid = vfsp->vfs_dev;
319 319 vap->va_nodeid = (ino64_t)hp->hs_nodeid;
320 320 vap->va_nlink = hp->hs_dirent.nlink;
321 321 vap->va_size = (offset_t)hp->hs_dirent.ext_size;
322 322
323 323 vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
324 324 vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
325 325 vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
326 326 vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
327 327 vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
328 328 vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
329 329 if (vp->v_type == VCHR || vp->v_type == VBLK)
330 330 vap->va_rdev = hp->hs_dirent.r_dev;
331 331 else
332 332 vap->va_rdev = 0;
333 333 vap->va_blksize = vfsp->vfs_bsize;
334 334 /* no. of blocks = no. of data blocks + no. of xar blocks */
335 335 vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
336 336 (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
337 337 vap->va_seq = hp->hs_seq;
338 338 return (0);
339 339 }
340 340
341 341 /*ARGSUSED*/
342 342 static int
343 343 hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred,
344 344 caller_context_t *ct)
345 345 {
346 346 struct hsnode *hp;
347 347
348 348 if (vp->v_type != VLNK)
349 349 return (EINVAL);
350 350
351 351 hp = VTOH(vp);
352 352
353 353 if (hp->hs_dirent.sym_link == (char *)NULL)
354 354 return (ENOENT);
355 355
356 356 return (uiomove(hp->hs_dirent.sym_link,
357 357 (size_t)MIN(hp->hs_dirent.ext_size,
358 358 uiop->uio_resid), UIO_READ, uiop));
359 359 }
360 360
361 361 /*ARGSUSED*/
362 362 static void
363 363 hsfs_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
364 364 {
365 365 struct hsnode *hp;
366 366 struct hsfs *fsp;
367 367
368 368 int nopage;
369 369
370 370 hp = VTOH(vp);
371 371 fsp = VFS_TO_HSFS(vp->v_vfsp);
372 372 /*
373 373 * Note: acquiring and holding v_lock for quite a while
374 374 * here serializes on the vnode; this is unfortunate, but
375 375 * likely not to overly impact performance, as the underlying
376 376 * device (CDROM drive) is quite slow.
377 377 */
378 378 rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
379 379 mutex_enter(&hp->hs_contents_lock);
380 380 mutex_enter(&vp->v_lock);
381 381
382 382 if (vp->v_count < 1) {
383 383 panic("hsfs_inactive: v_count < 1");
384 384 /*NOTREACHED*/
385 385 }
386 386
387 387 VN_RELE_LOCKED(vp);
388 388 if (vp->v_count > 0 || (hp->hs_flags & HREF) == 0) {
389 389 mutex_exit(&vp->v_lock);
390 390 mutex_exit(&hp->hs_contents_lock);
391 391 rw_exit(&fsp->hsfs_hash_lock);
392 392 return;
393 393 }
394 394 if (vp->v_count == 0) {
395 395 /*
396 396 * Free the hsnode.
397 397 * If there are no pages associated with the
398 398 * hsnode, give it back to the kmem_cache,
399 399 * else put at the end of this file system's
400 400 * internal free list.
401 401 */
402 402 nopage = !vn_has_cached_data(vp);
403 403 hp->hs_flags = 0;
404 404 /*
405 405 * exit these locks now, since hs_freenode may
406 406 * kmem_free the hsnode and embedded vnode
407 407 */
408 408 mutex_exit(&vp->v_lock);
409 409 mutex_exit(&hp->hs_contents_lock);
410 410 hs_freenode(vp, fsp, nopage);
411 411 } else {
412 412 mutex_exit(&vp->v_lock);
413 413 mutex_exit(&hp->hs_contents_lock);
414 414 }
415 415 rw_exit(&fsp->hsfs_hash_lock);
416 416 }
417 417
418 418
419 419 /*ARGSUSED*/
420 420 static int
421 421 hsfs_lookup(struct vnode *dvp, char *nm, struct vnode **vpp,
422 422 struct pathname *pnp, int flags, struct vnode *rdir, struct cred *cred,
423 423 caller_context_t *ct, int *direntflags, pathname_t *realpnp)
424 424 {
425 425 int error;
426 426 int namelen = (int)strlen(nm);
427 427
428 428 if (*nm == '\0') {
429 429 VN_HOLD(dvp);
430 430 *vpp = dvp;
431 431 return (0);
432 432 }
433 433
434 434 /*
435 435 * If we're looking for ourself, life is simple.
436 436 */
437 437 if (namelen == 1 && *nm == '.') {
438 438 if (error = hs_access(dvp, (mode_t)VEXEC, cred))
439 439 return (error);
440 440 VN_HOLD(dvp);
441 441 *vpp = dvp;
442 442 return (0);
443 443 }
444 444
445 445 return (hs_dirlook(dvp, nm, namelen, vpp, cred));
446 446 }
447 447
448 448
449 449 /*ARGSUSED*/
450 450 static int
451 451 hsfs_readdir(struct vnode *vp, struct uio *uiop, struct cred *cred, int *eofp,
452 452 caller_context_t *ct, int flags)
453 453 {
454 454 struct hsnode *dhp;
455 455 struct hsfs *fsp;
456 456 struct hs_direntry hd;
457 457 struct dirent64 *nd;
458 458 int error;
459 459 uint_t offset; /* real offset in directory */
460 460 uint_t dirsiz; /* real size of directory */
461 461 uchar_t *blkp;
462 462 int hdlen; /* length of hs directory entry */
463 463 long ndlen; /* length of dirent entry */
464 464 int bytes_wanted;
465 465 size_t bufsize; /* size of dirent buffer */
466 466 char *outbuf; /* ptr to dirent buffer */
467 467 char *dname;
468 468 int dnamelen;
469 469 size_t dname_size;
470 470 struct fbuf *fbp;
471 471 uint_t last_offset; /* last index into current dir block */
472 472 ino64_t dirino; /* temporary storage before storing in dirent */
473 473 off_t diroff;
474 474
475 475 dhp = VTOH(vp);
476 476 fsp = VFS_TO_HSFS(vp->v_vfsp);
477 477 if (dhp->hs_dirent.ext_size == 0)
478 478 hs_filldirent(vp, &dhp->hs_dirent);
479 479 dirsiz = dhp->hs_dirent.ext_size;
480 480 if (uiop->uio_loffset >= dirsiz) { /* at or beyond EOF */
481 481 if (eofp)
482 482 *eofp = 1;
483 483 return (0);
484 484 }
485 485 ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
486 486 offset = uiop->uio_loffset;
487 487
488 488 dname_size = fsp->hsfs_namemax + 1; /* 1 for the ending NUL */
489 489 dname = kmem_alloc(dname_size, KM_SLEEP);
490 490 bufsize = uiop->uio_resid + sizeof (struct dirent64);
491 491
492 492 outbuf = kmem_alloc(bufsize, KM_SLEEP);
493 493 nd = (struct dirent64 *)outbuf;
494 494
495 495 while (offset < dirsiz) {
496 496 bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
497 497
498 498 error = fbread(vp, (offset_t)(offset & MAXBMASK),
499 499 (unsigned int)bytes_wanted, S_READ, &fbp);
500 500 if (error)
501 501 goto done;
502 502
503 503 blkp = (uchar_t *)fbp->fb_addr;
504 504 last_offset = (offset & MAXBMASK) + fbp->fb_count;
505 505
506 506 #define rel_offset(offset) ((offset) & MAXBOFFSET) /* index into blkp */
507 507
508 508 while (offset < last_offset) {
509 509 /*
510 510 * Very similar validation code is found in
511 511 * process_dirblock(), hsfs_node.c.
512 512 * For an explanation, see there.
513 513 * It may make sense for the future to
514 514 * "consolidate" the code in hs_parsedir(),
515 515 * process_dirblock() and hsfs_readdir() into
516 516 * a single utility function.
517 517 */
518 518 hdlen = (int)((uchar_t)
519 519 HDE_DIR_LEN(&blkp[rel_offset(offset)]));
520 520 if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
521 521 offset + hdlen > last_offset) {
522 522 /*
523 523 * advance to next sector boundary
524 524 */
525 525 offset = roundup(offset + 1, HS_SECTOR_SIZE);
526 526 if (hdlen)
527 527 hs_log_bogus_disk_warning(fsp,
528 528 HSFS_ERR_TRAILING_JUNK, 0);
529 529
530 530 continue;
531 531 }
532 532
533 533 bzero(&hd, sizeof (hd));
534 534
535 535 /*
536 536 * Just ignore invalid directory entries.
537 537 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
538 538 */
539 539 if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
540 540 &hd, dname, &dnamelen, last_offset - offset)) {
541 541 /*
542 542 * Determine if there is enough room
543 543 */
544 544 ndlen = (long)DIRENT64_RECLEN((dnamelen));
545 545
546 546 if ((ndlen + ((char *)nd - outbuf)) >
547 547 uiop->uio_resid) {
548 548 fbrelse(fbp, S_READ);
549 549 goto done; /* output buffer full */
550 550 }
551 551
552 552 diroff = offset + hdlen;
553 553 /*
554 554 * If the media carries rrip-v1.12 or newer,
555 555 * and we trust the inodes from the rrip data
556 556 * (use_rrip_inodes != 0), use that data. If the
557 557 * media has been created by a recent mkisofs
558 558 * version, we may trust all numbers in the
559 559 * starting extent number; otherwise, we cannot
560 560 * do this for zero sized files and symlinks,
561 561 * because if we did we'd end up mapping all of
562 562 * them to the same node. We use HS_DUMMY_INO
563 563 * in this case and make sure that we will not
564 564 * map all files to the same meta data.
565 565 */
566 566 if (hd.inode != 0 && use_rrip_inodes) {
567 567 dirino = hd.inode;
568 568 } else if ((hd.ext_size == 0 ||
569 569 hd.sym_link != (char *)NULL) &&
570 570 (fsp->hsfs_flags & HSFSMNT_INODE) == 0) {
571 571 dirino = HS_DUMMY_INO;
572 572 } else {
573 573 dirino = hd.ext_lbn;
574 574 }
575 575
576 576 /* strncpy(9f) will zero uninitialized bytes */
577 577
578 578 ASSERT(strlen(dname) + 1 <=
579 579 DIRENT64_NAMELEN(ndlen));
580 580 (void) strncpy(nd->d_name, dname,
581 581 DIRENT64_NAMELEN(ndlen));
582 582 nd->d_reclen = (ushort_t)ndlen;
583 583 nd->d_off = (offset_t)diroff;
584 584 nd->d_ino = dirino;
585 585 nd = (struct dirent64 *)((char *)nd + ndlen);
586 586
587 587 /*
588 588 * free up space allocated for symlink
589 589 */
590 590 if (hd.sym_link != (char *)NULL) {
591 591 kmem_free(hd.sym_link,
592 592 (size_t)(hd.ext_size+1));
593 593 hd.sym_link = (char *)NULL;
594 594 }
595 595 }
596 596 offset += hdlen;
597 597 }
598 598 fbrelse(fbp, S_READ);
599 599 }
600 600
601 601 /*
602 602 * Got here for one of the following reasons:
603 603 * 1) outbuf is full (error == 0)
604 604 * 2) end of directory reached (error == 0)
605 605 * 3) error reading directory sector (error != 0)
606 606 * 4) directory entry crosses sector boundary (error == 0)
607 607 *
608 608 * If any directory entries have been copied, don't report
609 609 * case 4. Instead, return the valid directory entries.
610 610 *
611 611 * If no entries have been copied, report the error.
612 612 * If case 4, this will be indistiguishable from EOF.
613 613 */
614 614 done:
615 615 ndlen = ((char *)nd - outbuf);
616 616 if (ndlen != 0) {
617 617 error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
618 618 uiop->uio_loffset = offset;
619 619 }
620 620 kmem_free(dname, dname_size);
621 621 kmem_free(outbuf, bufsize);
622 622 if (eofp && error == 0)
623 623 *eofp = (uiop->uio_loffset >= dirsiz);
624 624 return (error);
625 625 }
626 626
627 627 /*ARGSUSED2*/
628 628 static int
629 629 hsfs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
630 630 {
631 631 struct hsnode *hp;
632 632 struct hsfid *fid;
633 633
634 634 if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
635 635 fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
636 636 return (ENOSPC);
637 637 }
638 638
639 639 fid = (struct hsfid *)fidp;
640 640 fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
641 641 hp = VTOH(vp);
642 642 mutex_enter(&hp->hs_contents_lock);
643 643 fid->hf_dir_lbn = hp->hs_dir_lbn;
644 644 fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
645 645 fid->hf_ino = hp->hs_nodeid;
646 646 mutex_exit(&hp->hs_contents_lock);
647 647 return (0);
648 648 }
649 649
650 650 /*ARGSUSED*/
651 651 static int
652 652 hsfs_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
653 653 {
654 654 return (0);
655 655 }
656 656
657 657 /*ARGSUSED*/
658 658 static int
659 659 hsfs_close(struct vnode *vp, int flag, int count, offset_t offset,
660 660 struct cred *cred, caller_context_t *ct)
661 661 {
662 662 (void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
663 663 cleanshares(vp, ttoproc(curthread)->p_pid);
664 664 return (0);
665 665 }
666 666
667 667 /*ARGSUSED2*/
668 668 static int
669 669 hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred,
670 670 caller_context_t *ct)
671 671 {
672 672 return (hs_access(vp, (mode_t)mode, cred));
673 673 }
674 674
675 675 /*
676 676 * the seek time of a CD-ROM is very slow, and data transfer
677 677 * rate is even worse (max. 150K per sec). The design
678 678 * decision is to reduce access to cd-rom as much as possible,
679 679 * and to transfer a sizable block (read-ahead) of data at a time.
680 680 * UFS style of read ahead one block at a time is not appropriate,
681 681 * and is not supported
682 682 */
683 683
684 684 /*
685 685 * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
686 686 */
687 687 #define KLUSTSIZE (56 * 1024)
688 688 /* we don't support read ahead */
689 689 int hsfs_lostpage; /* no. of times we lost original page */
690 690
691 691 /*
692 692 * Used to prevent biodone() from releasing buf resources that
693 693 * we didn't allocate in quite the usual way.
694 694 */
695 695 /*ARGSUSED*/
696 696 int
697 697 hsfs_iodone(struct buf *bp)
698 698 {
699 699 sema_v(&bp->b_io);
700 700 return (0);
701 701 }
702 702
703 703 /*
704 704 * The taskq thread that invokes the scheduling function to ensure
705 705 * that all readaheads are complete and cleans up the associated
706 706 * memory and releases the page lock.
707 707 */
708 708 void
709 709 hsfs_ra_task(void *arg)
710 710 {
711 711 struct hio_info *info = arg;
712 712 uint_t count;
713 713 struct buf *wbuf;
714 714
715 715 ASSERT(info->pp != NULL);
716 716
717 717 for (count = 0; count < info->bufsused; count++) {
718 718 wbuf = &(info->bufs[count]);
719 719
720 720 DTRACE_PROBE1(hsfs_io_wait_ra, struct buf *, wbuf);
721 721 while (sema_tryp(&(info->sema[count])) == 0) {
722 722 if (hsched_invoke_strategy(info->fsp)) {
723 723 sema_p(&(info->sema[count]));
724 724 break;
725 725 }
726 726 }
727 727 sema_destroy(&(info->sema[count]));
728 728 DTRACE_PROBE1(hsfs_io_done_ra, struct buf *, wbuf);
729 729 biofini(&(info->bufs[count]));
730 730 }
731 731 for (count = 0; count < info->bufsused; count++) {
732 732 if (info->vas[count] != NULL) {
733 733 ppmapout(info->vas[count]);
734 734 }
735 735 }
736 736 kmem_free(info->vas, info->bufcnt * sizeof (caddr_t));
737 737 kmem_free(info->bufs, info->bufcnt * sizeof (struct buf));
738 738 kmem_free(info->sema, info->bufcnt * sizeof (ksema_t));
739 739
740 740 pvn_read_done(info->pp, 0);
741 741 kmem_cache_free(hio_info_cache, info);
742 742 }
743 743
744 744 /*
745 745 * Submit asynchronous readahead requests to the I/O scheduler
746 746 * depending on the number of pages to read ahead. These requests
747 747 * are asynchronous to the calling thread but I/O requests issued
748 748 * subsequently by other threads with higher LBNs must wait for
749 749 * these readaheads to complete since we have a single ordered
750 750 * I/O pipeline. Thus these readaheads are semi-asynchronous.
751 751 * A TaskQ handles waiting for the readaheads to complete.
752 752 *
753 753 * This function is mostly a copy of hsfs_getapage but somewhat
754 754 * simpler. A readahead request is aborted if page allocation
755 755 * fails.
756 756 */
757 757 /*ARGSUSED*/
758 758 static int
759 759 hsfs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg,
760 760 caddr_t addr, struct hsnode *hp, struct hsfs *fsp, int xarsiz,
761 761 offset_t bof, int chunk_lbn_count, int chunk_data_bytes)
762 762 {
763 763 struct buf *bufs;
764 764 caddr_t *vas;
765 765 caddr_t va;
766 766 struct page *pp, *searchp, *lastp;
767 767 struct vnode *devvp;
768 768 ulong_t byte_offset;
769 769 size_t io_len_tmp;
770 770 uint_t io_off, io_len;
771 771 uint_t xlen;
772 772 uint_t filsiz;
773 773 uint_t secsize;
774 774 uint_t bufcnt;
775 775 uint_t bufsused;
776 776 uint_t count;
777 777 uint_t io_end;
778 778 uint_t which_chunk_lbn;
779 779 uint_t offset_lbn;
780 780 uint_t offset_extra;
781 781 offset_t offset_bytes;
782 782 uint_t remaining_bytes;
783 783 uint_t extension;
784 784 int remainder; /* must be signed */
785 785 diskaddr_t driver_block;
786 786 u_offset_t io_off_tmp;
787 787 ksema_t *fio_done;
788 788 struct hio_info *info;
789 789 size_t len;
790 790
791 791 ASSERT(fsp->hqueue != NULL);
792 792
793 793 if (addr >= seg->s_base + seg->s_size) {
794 794 return (-1);
795 795 }
796 796
797 797 devvp = fsp->hsfs_devvp;
798 798 secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
799 799
800 800 /* file data size */
801 801 filsiz = hp->hs_dirent.ext_size;
802 802
803 803 if (off >= filsiz)
804 804 return (0);
805 805
806 806 extension = 0;
807 807 pp = NULL;
808 808
809 809 extension += hp->hs_ra_bytes;
810 810
811 811 /*
812 812 * Some CD writers (e.g. Kodak Photo CD writers)
813 813 * create CDs in TAO mode and reserve tracks that
814 814 * are not completely written. Some sectors remain
815 815 * unreadable for this reason and give I/O errors.
816 816 * Also, there's no point in reading sectors
817 817 * we'll never look at. So, if we're asked to go
818 818 * beyond the end of a file, truncate to the length
819 819 * of that file.
820 820 *
821 821 * Additionally, this behaviour is required by section
822 822 * 6.4.5 of ISO 9660:1988(E).
823 823 */
824 824 len = MIN(extension ? extension : PAGESIZE, filsiz - off);
825 825
826 826 /* A little paranoia */
827 827 if (len <= 0)
828 828 return (-1);
829 829
830 830 /*
831 831 * After all that, make sure we're asking for things in units
832 832 * that bdev_strategy() will understand (see bug 4202551).
833 833 */
834 834 len = roundup(len, DEV_BSIZE);
835 835
836 836 pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
837 837 &io_len_tmp, off, len, 1);
838 838
839 839 if (pp == NULL) {
840 840 hp->hs_num_contig = 0;
841 841 hp->hs_ra_bytes = 0;
842 842 hp->hs_prev_offset = 0;
843 843 return (-1);
844 844 }
845 845
846 846 io_off = (uint_t)io_off_tmp;
847 847 io_len = (uint_t)io_len_tmp;
848 848
849 849 /* check for truncation */
850 850 /*
851 851 * xxx Clean up and return EIO instead?
852 852 * xxx Ought to go to u_offset_t for everything, but we
853 853 * xxx call lots of things that want uint_t arguments.
854 854 */
855 855 ASSERT(io_off == io_off_tmp);
856 856
857 857 /*
858 858 * get enough buffers for worst-case scenario
859 859 * (i.e., no coalescing possible).
860 860 */
861 861 bufcnt = (len + secsize - 1) / secsize;
862 862 bufs = kmem_alloc(bufcnt * sizeof (struct buf), KM_SLEEP);
863 863 vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
864 864
865 865 /*
866 866 * Allocate a array of semaphores since we are doing I/O
867 867 * scheduling.
868 868 */
869 869 fio_done = kmem_alloc(bufcnt * sizeof (ksema_t), KM_SLEEP);
870 870
871 871 /*
872 872 * If our filesize is not an integer multiple of PAGESIZE,
873 873 * we zero that part of the last page that's between EOF and
874 874 * the PAGESIZE boundary.
875 875 */
876 876 xlen = io_len & PAGEOFFSET;
877 877 if (xlen != 0)
878 878 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
879 879
880 880 DTRACE_PROBE2(hsfs_readahead, struct vnode *, vp, uint_t, io_len);
881 881
882 882 va = NULL;
883 883 lastp = NULL;
884 884 searchp = pp;
885 885 io_end = io_off + io_len;
886 886 for (count = 0, byte_offset = io_off;
887 887 byte_offset < io_end;
888 888 count++) {
889 889 ASSERT(count < bufcnt);
890 890
891 891 bioinit(&bufs[count]);
892 892 bufs[count].b_edev = devvp->v_rdev;
893 893 bufs[count].b_dev = cmpdev(devvp->v_rdev);
894 894 bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
895 895 bufs[count].b_iodone = hsfs_iodone;
896 896 bufs[count].b_vp = vp;
897 897 bufs[count].b_file = vp;
898 898
899 899 /* Compute disk address for interleaving. */
900 900
901 901 /* considered without skips */
902 902 which_chunk_lbn = byte_offset / chunk_data_bytes;
903 903
904 904 /* factor in skips */
905 905 offset_lbn = which_chunk_lbn * chunk_lbn_count;
906 906
907 907 /* convert to physical byte offset for lbn */
908 908 offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
909 909
910 910 /* don't forget offset into lbn */
911 911 offset_extra = byte_offset % chunk_data_bytes;
912 912
913 913 /* get virtual block number for driver */
914 914 driver_block = lbtodb(bof + xarsiz
915 915 + offset_bytes + offset_extra);
916 916
917 917 if (lastp != searchp) {
918 918 /* this branch taken first time through loop */
919 919 va = vas[count] = ppmapin(searchp, PROT_WRITE,
920 920 (caddr_t)-1);
921 921 /* ppmapin() guarantees not to return NULL */
922 922 } else {
923 923 vas[count] = NULL;
924 924 }
925 925
926 926 bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
927 927 bufs[count].b_offset =
928 928 (offset_t)(byte_offset - io_off + off);
929 929
930 930 /*
931 931 * We specifically use the b_lblkno member here
932 932 * as even in the 32 bit world driver_block can
933 933 * get very large in line with the ISO9660 spec.
934 934 */
935 935
936 936 bufs[count].b_lblkno = driver_block;
937 937
938 938 remaining_bytes = ((which_chunk_lbn + 1) * chunk_data_bytes)
939 939 - byte_offset;
940 940
941 941 /*
942 942 * remaining_bytes can't be zero, as we derived
943 943 * which_chunk_lbn directly from byte_offset.
944 944 */
945 945 if ((remaining_bytes + byte_offset) < (off + len)) {
946 946 /* coalesce-read the rest of the chunk */
947 947 bufs[count].b_bcount = remaining_bytes;
948 948 } else {
949 949 /* get the final bits */
950 950 bufs[count].b_bcount = off + len - byte_offset;
951 951 }
952 952
953 953 remainder = PAGESIZE - (byte_offset % PAGESIZE);
954 954 if (bufs[count].b_bcount > remainder) {
955 955 bufs[count].b_bcount = remainder;
956 956 }
957 957
958 958 bufs[count].b_bufsize = bufs[count].b_bcount;
959 959 if (((offset_t)byte_offset + bufs[count].b_bcount) >
960 960 HS_MAXFILEOFF) {
961 961 break;
962 962 }
963 963 byte_offset += bufs[count].b_bcount;
964 964
965 965 /*
966 966 * We are scheduling I/O so we need to enqueue
967 967 * requests rather than calling bdev_strategy
968 968 * here. A later invocation of the scheduling
969 969 * function will take care of doing the actual
970 970 * I/O as it selects requests from the queue as
971 971 * per the scheduling logic.
972 972 */
973 973 struct hio *hsio = kmem_cache_alloc(hio_cache,
974 974 KM_SLEEP);
975 975
976 976 sema_init(&fio_done[count], 0, NULL,
977 977 SEMA_DEFAULT, NULL);
978 978 hsio->bp = &bufs[count];
979 979 hsio->sema = &fio_done[count];
980 980 hsio->io_lblkno = bufs[count].b_lblkno;
981 981 hsio->nblocks = howmany(hsio->bp->b_bcount,
982 982 DEV_BSIZE);
983 983
984 984 /* used for deadline */
985 985 hsio->io_timestamp = drv_hztousec(ddi_get_lbolt());
986 986
987 987 /* for I/O coalescing */
988 988 hsio->contig_chain = NULL;
989 989 hsched_enqueue_io(fsp, hsio, 1);
990 990
991 991 lwp_stat_update(LWP_STAT_INBLK, 1);
992 992 lastp = searchp;
993 993 if ((remainder - bufs[count].b_bcount) < 1) {
994 994 searchp = searchp->p_next;
995 995 }
996 996 }
997 997
998 998 bufsused = count;
999 999 info = kmem_cache_alloc(hio_info_cache, KM_SLEEP);
1000 1000 info->bufs = bufs;
1001 1001 info->vas = vas;
1002 1002 info->sema = fio_done;
1003 1003 info->bufsused = bufsused;
1004 1004 info->bufcnt = bufcnt;
1005 1005 info->fsp = fsp;
1006 1006 info->pp = pp;
1007 1007
1008 1008 (void) taskq_dispatch(fsp->hqueue->ra_task,
1009 1009 hsfs_ra_task, info, KM_SLEEP);
1010 1010 /*
1011 1011 * The I/O locked pages are unlocked in our taskq thread.
1012 1012 */
1013 1013 return (0);
1014 1014 }
1015 1015
1016 1016 /*
1017 1017 * Each file may have a different interleaving on disk. This makes
1018 1018 * things somewhat interesting. The gist is that there are some
1019 1019 * number of contiguous data sectors, followed by some other number
1020 1020 * of contiguous skip sectors. The sum of those two sets of sectors
1021 1021 * defines the interleave size. Unfortunately, it means that we generally
1022 1022 * can't simply read N sectors starting at a given offset to satisfy
1023 1023 * any given request.
1024 1024 *
1025 1025 * What we do is get the relevant memory pages via pvn_read_kluster(),
1026 1026 * then stride through the interleaves, setting up a buf for each
1027 1027 * sector that needs to be brought in. Instead of kmem_alloc'ing
1028 1028 * space for the sectors, though, we just point at the appropriate
1029 1029 * spot in the relevant page for each of them. This saves us a bunch
1030 1030 * of copying.
1031 1031 *
1032 1032 * NOTICE: The code below in hsfs_getapage is mostly same as the code
1033 1033 * in hsfs_getpage_ra above (with some omissions). If you are
1034 1034 * making any change to this function, please also look at
1035 1035 * hsfs_getpage_ra.
1036 1036 */
1037 1037 /*ARGSUSED*/
1038 1038 static int
1039 1039 hsfs_getapage(struct vnode *vp, u_offset_t off, size_t len, uint_t *protp,
1040 1040 struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
1041 1041 enum seg_rw rw, struct cred *cred)
1042 1042 {
1043 1043 struct hsnode *hp;
1044 1044 struct hsfs *fsp;
1045 1045 int err;
1046 1046 struct buf *bufs;
1047 1047 caddr_t *vas;
1048 1048 caddr_t va;
1049 1049 struct page *pp, *searchp, *lastp;
1050 1050 page_t *pagefound;
1051 1051 offset_t bof;
1052 1052 struct vnode *devvp;
1053 1053 ulong_t byte_offset;
1054 1054 size_t io_len_tmp;
1055 1055 uint_t io_off, io_len;
1056 1056 uint_t xlen;
1057 1057 uint_t filsiz;
1058 1058 uint_t secsize;
1059 1059 uint_t bufcnt;
1060 1060 uint_t bufsused;
1061 1061 uint_t count;
1062 1062 uint_t io_end;
1063 1063 uint_t which_chunk_lbn;
1064 1064 uint_t offset_lbn;
1065 1065 uint_t offset_extra;
1066 1066 offset_t offset_bytes;
1067 1067 uint_t remaining_bytes;
1068 1068 uint_t extension;
1069 1069 int remainder; /* must be signed */
1070 1070 int chunk_lbn_count;
1071 1071 int chunk_data_bytes;
1072 1072 int xarsiz;
1073 1073 diskaddr_t driver_block;
1074 1074 u_offset_t io_off_tmp;
1075 1075 ksema_t *fio_done;
1076 1076 int calcdone;
1077 1077
1078 1078 /*
1079 1079 * We don't support asynchronous operation at the moment, so
1080 1080 * just pretend we did it. If the pages are ever actually
1081 1081 * needed, they'll get brought in then.
1082 1082 */
1083 1083 if (pl == NULL)
1084 1084 return (0);
1085 1085
1086 1086 hp = VTOH(vp);
1087 1087 fsp = VFS_TO_HSFS(vp->v_vfsp);
1088 1088 devvp = fsp->hsfs_devvp;
1089 1089 secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
1090 1090
1091 1091 /* file data size */
1092 1092 filsiz = hp->hs_dirent.ext_size;
1093 1093
1094 1094 /* disk addr for start of file */
1095 1095 bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
1096 1096
1097 1097 /* xarsiz byte must be skipped for data */
1098 1098 xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
1099 1099
1100 1100 /* how many logical blocks in an interleave (data+skip) */
1101 1101 chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
1102 1102
1103 1103 if (chunk_lbn_count == 0) {
1104 1104 chunk_lbn_count = 1;
1105 1105 }
1106 1106
1107 1107 /*
1108 1108 * Convert interleaving size into bytes. The zero case
1109 1109 * (no interleaving) optimization is handled as a side-
1110 1110 * effect of the read-ahead logic.
1111 1111 */
1112 1112 if (hp->hs_dirent.intlf_sz == 0) {
1113 1113 chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
1114 1114 /*
1115 1115 * Optimization: If our pagesize is a multiple of LBN
1116 1116 * bytes, we can avoid breaking up a page into individual
1117 1117 * lbn-sized requests.
1118 1118 */
1119 1119 if (PAGESIZE % chunk_data_bytes == 0) {
1120 1120 chunk_lbn_count = BYTE_TO_LBN(PAGESIZE, vp->v_vfsp);
1121 1121 chunk_data_bytes = PAGESIZE;
1122 1122 }
1123 1123 } else {
1124 1124 chunk_data_bytes =
1125 1125 LBN_TO_BYTE(hp->hs_dirent.intlf_sz, vp->v_vfsp);
1126 1126 }
1127 1127
1128 1128 reread:
1129 1129 err = 0;
1130 1130 pagefound = 0;
1131 1131 calcdone = 0;
1132 1132
1133 1133 /*
1134 1134 * Do some read-ahead. This mostly saves us a bit of
1135 1135 * system cpu time more than anything else when doing
1136 1136 * sequential reads. At some point, could do the
1137 1137 * read-ahead asynchronously which might gain us something
1138 1138 * on wall time, but it seems unlikely....
1139 1139 *
1140 1140 * We do the easy case here, which is to read through
1141 1141 * the end of the chunk, minus whatever's at the end that
1142 1142 * won't exactly fill a page.
1143 1143 */
1144 1144 if (hp->hs_ra_bytes > 0 && chunk_data_bytes != PAGESIZE) {
1145 1145 which_chunk_lbn = (off + len) / chunk_data_bytes;
1146 1146 extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
1147 1147 extension -= (extension % PAGESIZE);
1148 1148 } else {
1149 1149 extension = roundup(len, PAGESIZE);
1150 1150 }
1151 1151
1152 1152 atomic_inc_64(&fsp->total_pages_requested);
1153 1153
|
↓ open down ↓ |
1118 lines elided |
↑ open up ↑ |
1154 1154 pp = NULL;
1155 1155 again:
1156 1156 /* search for page in buffer */
1157 1157 if ((pagefound = page_exists(vp, off)) == 0) {
1158 1158 /*
1159 1159 * Need to really do disk IO to get the page.
1160 1160 */
1161 1161 if (!calcdone) {
1162 1162 extension += hp->hs_ra_bytes;
1163 1163
1164 + len = (extension != 0) ? extension : PAGESIZE;
1165 +
1164 1166 /*
1165 1167 * Some cd writers don't write sectors that aren't
1166 1168 * used. Also, there's no point in reading sectors
1167 1169 * we'll never look at. So, if we're asked to go
1168 1170 * beyond the end of a file, truncate to the length
1169 1171 * of that file.
1170 1172 *
1171 1173 * Additionally, this behaviour is required by section
1172 1174 * 6.4.5 of ISO 9660:1988(E).
1173 1175 */
1174 - len = MIN(extension ? extension : PAGESIZE,
1175 - filsiz - off);
1176 + if (off < filsiz && off + len > filsiz)
1177 + len = filsiz - off;
1176 1178
1177 - /* A little paranoia. */
1178 - ASSERT(len > 0);
1179 -
1180 1179 /*
1181 1180 * After all that, make sure we're asking for things
1182 - * in units that bdev_strategy() will understand
1183 - * (see bug 4202551).
1181 + * in units that bdev_strategy() will understand.
1184 1182 */
1185 1183 len = roundup(len, DEV_BSIZE);
1186 1184 calcdone = 1;
1187 1185 }
1188 1186
1189 1187 pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
1190 1188 &io_len_tmp, off, len, 0);
1191 1189
1192 1190 if (pp == NULL) {
1193 1191 /*
1194 1192 * Pressure on memory, roll back readahead
1195 1193 */
1196 1194 hp->hs_num_contig = 0;
1197 1195 hp->hs_ra_bytes = 0;
1198 1196 hp->hs_prev_offset = 0;
1199 1197 goto again;
1200 1198 }
1201 1199
1202 1200 io_off = (uint_t)io_off_tmp;
1203 1201 io_len = (uint_t)io_len_tmp;
1204 1202
1205 1203 /* check for truncation */
1206 1204 /*
1207 1205 * xxx Clean up and return EIO instead?
1208 1206 * xxx Ought to go to u_offset_t for everything, but we
1209 1207 * xxx call lots of things that want uint_t arguments.
1210 1208 */
1211 1209 ASSERT(io_off == io_off_tmp);
1212 1210
1213 1211 /*
1214 1212 * get enough buffers for worst-case scenario
1215 1213 * (i.e., no coalescing possible).
1216 1214 */
1217 1215 bufcnt = (len + secsize - 1) / secsize;
1218 1216 bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
1219 1217 vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
1220 1218
1221 1219 /*
1222 1220 * Allocate a array of semaphores if we are doing I/O
1223 1221 * scheduling.
1224 1222 */
1225 1223 if (fsp->hqueue != NULL)
1226 1224 fio_done = kmem_alloc(bufcnt * sizeof (ksema_t),
1227 1225 KM_SLEEP);
1228 1226 for (count = 0; count < bufcnt; count++) {
1229 1227 bioinit(&bufs[count]);
1230 1228 bufs[count].b_edev = devvp->v_rdev;
1231 1229 bufs[count].b_dev = cmpdev(devvp->v_rdev);
1232 1230 bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
1233 1231 bufs[count].b_iodone = hsfs_iodone;
1234 1232 bufs[count].b_vp = vp;
1235 1233 bufs[count].b_file = vp;
1236 1234 }
1237 1235
1238 1236 /*
1239 1237 * If our filesize is not an integer multiple of PAGESIZE,
1240 1238 * we zero that part of the last page that's between EOF and
1241 1239 * the PAGESIZE boundary.
1242 1240 */
1243 1241 xlen = io_len & PAGEOFFSET;
1244 1242 if (xlen != 0)
1245 1243 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
1246 1244
1247 1245 va = NULL;
1248 1246 lastp = NULL;
1249 1247 searchp = pp;
1250 1248 io_end = io_off + io_len;
1251 1249 for (count = 0, byte_offset = io_off;
1252 1250 byte_offset < io_end; count++) {
1253 1251 ASSERT(count < bufcnt);
1254 1252
1255 1253 /* Compute disk address for interleaving. */
1256 1254
1257 1255 /* considered without skips */
1258 1256 which_chunk_lbn = byte_offset / chunk_data_bytes;
1259 1257
1260 1258 /* factor in skips */
1261 1259 offset_lbn = which_chunk_lbn * chunk_lbn_count;
1262 1260
1263 1261 /* convert to physical byte offset for lbn */
1264 1262 offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
1265 1263
1266 1264 /* don't forget offset into lbn */
1267 1265 offset_extra = byte_offset % chunk_data_bytes;
1268 1266
1269 1267 /* get virtual block number for driver */
1270 1268 driver_block =
1271 1269 lbtodb(bof + xarsiz + offset_bytes + offset_extra);
1272 1270
1273 1271 if (lastp != searchp) {
1274 1272 /* this branch taken first time through loop */
1275 1273 va = vas[count] =
1276 1274 ppmapin(searchp, PROT_WRITE, (caddr_t)-1);
1277 1275 /* ppmapin() guarantees not to return NULL */
1278 1276 } else {
1279 1277 vas[count] = NULL;
1280 1278 }
1281 1279
1282 1280 bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
1283 1281 bufs[count].b_offset =
1284 1282 (offset_t)(byte_offset - io_off + off);
1285 1283
1286 1284 /*
1287 1285 * We specifically use the b_lblkno member here
1288 1286 * as even in the 32 bit world driver_block can
1289 1287 * get very large in line with the ISO9660 spec.
1290 1288 */
1291 1289
1292 1290 bufs[count].b_lblkno = driver_block;
1293 1291
1294 1292 remaining_bytes =
1295 1293 ((which_chunk_lbn + 1) * chunk_data_bytes)
1296 1294 - byte_offset;
1297 1295
1298 1296 /*
1299 1297 * remaining_bytes can't be zero, as we derived
1300 1298 * which_chunk_lbn directly from byte_offset.
1301 1299 */
1302 1300 if ((remaining_bytes + byte_offset) < (off + len)) {
1303 1301 /* coalesce-read the rest of the chunk */
1304 1302 bufs[count].b_bcount = remaining_bytes;
1305 1303 } else {
1306 1304 /* get the final bits */
1307 1305 bufs[count].b_bcount = off + len - byte_offset;
1308 1306 }
1309 1307
1310 1308 /*
1311 1309 * It would be nice to do multiple pages'
1312 1310 * worth at once here when the opportunity
1313 1311 * arises, as that has been shown to improve
1314 1312 * our wall time. However, to do that
1315 1313 * requires that we use the pageio subsystem,
1316 1314 * which doesn't mix well with what we're
1317 1315 * already using here. We can't use pageio
1318 1316 * all the time, because that subsystem
1319 1317 * assumes that a page is stored in N
1320 1318 * contiguous blocks on the device.
1321 1319 * Interleaving violates that assumption.
1322 1320 *
1323 1321 * Update: This is now not so big a problem
1324 1322 * because of the I/O scheduler sitting below
1325 1323 * that can re-order and coalesce I/O requests.
1326 1324 */
1327 1325
1328 1326 remainder = PAGESIZE - (byte_offset % PAGESIZE);
1329 1327 if (bufs[count].b_bcount > remainder) {
1330 1328 bufs[count].b_bcount = remainder;
1331 1329 }
1332 1330
1333 1331 bufs[count].b_bufsize = bufs[count].b_bcount;
1334 1332 if (((offset_t)byte_offset + bufs[count].b_bcount) >
1335 1333 HS_MAXFILEOFF) {
1336 1334 break;
1337 1335 }
1338 1336 byte_offset += bufs[count].b_bcount;
1339 1337
1340 1338 if (fsp->hqueue == NULL) {
1341 1339 (void) bdev_strategy(&bufs[count]);
1342 1340
1343 1341 } else {
1344 1342 /*
1345 1343 * We are scheduling I/O so we need to enqueue
1346 1344 * requests rather than calling bdev_strategy
1347 1345 * here. A later invocation of the scheduling
1348 1346 * function will take care of doing the actual
1349 1347 * I/O as it selects requests from the queue as
1350 1348 * per the scheduling logic.
1351 1349 */
1352 1350 struct hio *hsio = kmem_cache_alloc(hio_cache,
1353 1351 KM_SLEEP);
1354 1352
1355 1353 sema_init(&fio_done[count], 0, NULL,
1356 1354 SEMA_DEFAULT, NULL);
1357 1355 hsio->bp = &bufs[count];
1358 1356 hsio->sema = &fio_done[count];
1359 1357 hsio->io_lblkno = bufs[count].b_lblkno;
1360 1358 hsio->nblocks = howmany(hsio->bp->b_bcount,
1361 1359 DEV_BSIZE);
1362 1360
1363 1361 /* used for deadline */
1364 1362 hsio->io_timestamp =
1365 1363 drv_hztousec(ddi_get_lbolt());
1366 1364
1367 1365 /* for I/O coalescing */
1368 1366 hsio->contig_chain = NULL;
1369 1367 hsched_enqueue_io(fsp, hsio, 0);
1370 1368 }
1371 1369
1372 1370 lwp_stat_update(LWP_STAT_INBLK, 1);
1373 1371 lastp = searchp;
1374 1372 if ((remainder - bufs[count].b_bcount) < 1) {
1375 1373 searchp = searchp->p_next;
1376 1374 }
1377 1375 }
1378 1376
1379 1377 bufsused = count;
1380 1378 /* Now wait for everything to come in */
1381 1379 if (fsp->hqueue == NULL) {
1382 1380 for (count = 0; count < bufsused; count++) {
1383 1381 if (err == 0) {
1384 1382 err = biowait(&bufs[count]);
1385 1383 } else
1386 1384 (void) biowait(&bufs[count]);
1387 1385 }
1388 1386 } else {
1389 1387 for (count = 0; count < bufsused; count++) {
1390 1388 struct buf *wbuf;
1391 1389
1392 1390 /*
1393 1391 * Invoke scheduling function till our buf
1394 1392 * is processed. In doing this it might
1395 1393 * process bufs enqueued by other threads
1396 1394 * which is good.
1397 1395 */
1398 1396 wbuf = &bufs[count];
1399 1397 DTRACE_PROBE1(hsfs_io_wait, struct buf *, wbuf);
1400 1398 while (sema_tryp(&fio_done[count]) == 0) {
1401 1399 /*
1402 1400 * hsched_invoke_strategy will return 1
1403 1401 * if the I/O queue is empty. This means
1404 1402 * that there is another thread who has
1405 1403 * issued our buf and is waiting. So we
1406 1404 * just block instead of spinning.
1407 1405 */
1408 1406 if (hsched_invoke_strategy(fsp)) {
1409 1407 sema_p(&fio_done[count]);
1410 1408 break;
1411 1409 }
1412 1410 }
1413 1411 sema_destroy(&fio_done[count]);
1414 1412 DTRACE_PROBE1(hsfs_io_done, struct buf *, wbuf);
1415 1413
1416 1414 if (err == 0) {
1417 1415 err = geterror(wbuf);
1418 1416 }
1419 1417 }
1420 1418 kmem_free(fio_done, bufcnt * sizeof (ksema_t));
1421 1419 }
1422 1420
1423 1421 /* Don't leak resources */
1424 1422 for (count = 0; count < bufcnt; count++) {
1425 1423 biofini(&bufs[count]);
1426 1424 if (count < bufsused && vas[count] != NULL) {
1427 1425 ppmapout(vas[count]);
1428 1426 }
1429 1427 }
1430 1428
1431 1429 kmem_free(vas, bufcnt * sizeof (caddr_t));
1432 1430 kmem_free(bufs, bufcnt * sizeof (struct buf));
1433 1431 }
1434 1432
1435 1433 if (err) {
1436 1434 pvn_read_done(pp, B_ERROR);
1437 1435 return (err);
1438 1436 }
1439 1437
1440 1438 /*
1441 1439 * Lock the requested page, and the one after it if possible.
1442 1440 * Don't bother if our caller hasn't given us a place to stash
1443 1441 * the page pointers, since otherwise we'd lock pages that would
1444 1442 * never get unlocked.
1445 1443 */
1446 1444 if (pagefound) {
1447 1445 int index;
1448 1446 ulong_t soff;
1449 1447
1450 1448 /*
1451 1449 * Make sure it's in memory before we say it's here.
1452 1450 */
1453 1451 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
1454 1452 hsfs_lostpage++;
1455 1453 goto reread;
1456 1454 }
1457 1455
1458 1456 pl[0] = pp;
1459 1457 index = 1;
1460 1458 atomic_inc_64(&fsp->cache_read_pages);
1461 1459
1462 1460 /*
1463 1461 * Try to lock the next page, if it exists, without
1464 1462 * blocking.
1465 1463 */
1466 1464 plsz -= PAGESIZE;
1467 1465 /* LINTED (plsz is unsigned) */
1468 1466 for (soff = off + PAGESIZE; plsz > 0;
1469 1467 soff += PAGESIZE, plsz -= PAGESIZE) {
1470 1468 pp = page_lookup_nowait(vp, (u_offset_t)soff,
1471 1469 SE_SHARED);
1472 1470 if (pp == NULL)
1473 1471 break;
1474 1472 pl[index++] = pp;
1475 1473 }
1476 1474 pl[index] = NULL;
1477 1475
1478 1476 /*
1479 1477 * Schedule a semi-asynchronous readahead if we are
1480 1478 * accessing the last cached page for the current
1481 1479 * file.
1482 1480 *
1483 1481 * Doing this here means that readaheads will be
1484 1482 * issued only if cache-hits occur. This is an advantage
1485 1483 * since cache-hits would mean that readahead is giving
1486 1484 * the desired benefit. If cache-hits do not occur there
1487 1485 * is no point in reading ahead of time - the system
1488 1486 * is loaded anyway.
1489 1487 */
1490 1488 if (fsp->hqueue != NULL &&
1491 1489 hp->hs_prev_offset - off == PAGESIZE &&
1492 1490 hp->hs_prev_offset < filsiz &&
1493 1491 hp->hs_ra_bytes > 0 &&
1494 1492 !page_exists(vp, hp->hs_prev_offset)) {
1495 1493 (void) hsfs_getpage_ra(vp, hp->hs_prev_offset, seg,
1496 1494 addr + PAGESIZE, hp, fsp, xarsiz, bof,
1497 1495 chunk_lbn_count, chunk_data_bytes);
1498 1496 }
1499 1497
1500 1498 return (0);
1501 1499 }
1502 1500
1503 1501 if (pp != NULL) {
1504 1502 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
1505 1503 }
1506 1504
1507 1505 return (err);
1508 1506 }
1509 1507
1510 1508 /*ARGSUSED*/
1511 1509 static int
1512 1510 hsfs_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp,
1513 1511 struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
1514 1512 enum seg_rw rw, struct cred *cred, caller_context_t *ct)
1515 1513 {
1516 1514 uint_t filsiz;
1517 1515 struct hsfs *fsp;
1518 1516 struct hsnode *hp;
1519 1517
1520 1518 fsp = VFS_TO_HSFS(vp->v_vfsp);
1521 1519 hp = VTOH(vp);
1522 1520
1523 1521 /* does not support write */
1524 1522 if (rw == S_WRITE) {
1525 1523 return (EROFS);
1526 1524 }
1527 1525
1528 1526 if (vp->v_flag & VNOMAP) {
1529 1527 return (ENOSYS);
1530 1528 }
1531 1529
1532 1530 ASSERT(off <= HS_MAXFILEOFF);
1533 1531
1534 1532 /*
1535 1533 * Determine file data size for EOF check.
1536 1534 */
1537 1535 filsiz = hp->hs_dirent.ext_size;
1538 1536 if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
1539 1537 return (EFAULT); /* beyond EOF */
1540 1538
1541 1539 /*
1542 1540 * Async Read-ahead computation.
1543 1541 * This attempts to detect sequential access pattern and
1544 1542 * enables reading extra pages ahead of time.
1545 1543 */
1546 1544 if (fsp->hqueue != NULL) {
1547 1545 /*
1548 1546 * This check for sequential access also takes into
1549 1547 * account segmap weirdness when reading in chunks
1550 1548 * less than the segmap size of 8K.
1551 1549 */
1552 1550 if (hp->hs_prev_offset == off || (off <
1553 1551 hp->hs_prev_offset && off + MAX(len, PAGESIZE)
1554 1552 >= hp->hs_prev_offset)) {
1555 1553 if (hp->hs_num_contig <
1556 1554 (seq_contig_requests - 1)) {
1557 1555 hp->hs_num_contig++;
1558 1556
1559 1557 } else {
1560 1558 /*
1561 1559 * We increase readahead quantum till
1562 1560 * a predefined max. max_readahead_bytes
1563 1561 * is a multiple of PAGESIZE.
1564 1562 */
1565 1563 if (hp->hs_ra_bytes <
1566 1564 fsp->hqueue->max_ra_bytes) {
1567 1565 hp->hs_ra_bytes += PAGESIZE;
1568 1566 }
1569 1567 }
1570 1568 } else {
1571 1569 /*
1572 1570 * Not contiguous so reduce read ahead counters.
1573 1571 */
1574 1572 if (hp->hs_ra_bytes > 0)
1575 1573 hp->hs_ra_bytes -= PAGESIZE;
1576 1574
1577 1575 if (hp->hs_ra_bytes <= 0) {
1578 1576 hp->hs_ra_bytes = 0;
1579 1577 if (hp->hs_num_contig > 0)
1580 1578 hp->hs_num_contig--;
1581 1579 }
1582 1580 }
1583 1581 /*
1584 1582 * Length must be rounded up to page boundary.
1585 1583 * since we read in units of pages.
1586 1584 */
1587 1585 hp->hs_prev_offset = off + roundup(len, PAGESIZE);
1588 1586 DTRACE_PROBE1(hsfs_compute_ra, struct hsnode *, hp);
1589 1587 }
1590 1588 if (protp != NULL)
1591 1589 *protp = PROT_ALL;
1592 1590
1593 1591 return (pvn_getpages(hsfs_getapage, vp, off, len, protp, pl, plsz,
1594 1592 seg, addr, rw, cred));
1595 1593 }
1596 1594
1597 1595
1598 1596
1599 1597 /*
1600 1598 * This function should never be called. We need to have it to pass
1601 1599 * it as an argument to other functions.
1602 1600 */
1603 1601 /*ARGSUSED*/
1604 1602 int
1605 1603 hsfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
1606 1604 int flags, cred_t *cr)
1607 1605 {
1608 1606 /* should never happen - just destroy it */
1609 1607 cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
1610 1608 pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
1611 1609 return (0);
1612 1610 }
1613 1611
1614 1612
1615 1613 /*
1616 1614 * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
1617 1615 * B_INVAL is set by:
1618 1616 *
1619 1617 * 1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
1620 1618 * 2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
1621 1619 * which translates to an MC_SYNC with the MS_INVALIDATE flag.
1622 1620 *
1623 1621 * The B_FREE (as well as the B_DONTNEED) flag is set when the
1624 1622 * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
1625 1623 * from SEGVN to release pages behind a pagefault.
1626 1624 */
1627 1625 /*ARGSUSED*/
1628 1626 static int
1629 1627 hsfs_putpage(struct vnode *vp, offset_t off, size_t len, int flags,
1630 1628 struct cred *cr, caller_context_t *ct)
1631 1629 {
1632 1630 int error = 0;
1633 1631
1634 1632 if (vp->v_count == 0) {
1635 1633 panic("hsfs_putpage: bad v_count");
1636 1634 /*NOTREACHED*/
1637 1635 }
1638 1636
1639 1637 if (vp->v_flag & VNOMAP)
1640 1638 return (ENOSYS);
1641 1639
1642 1640 ASSERT(off <= HS_MAXFILEOFF);
1643 1641
1644 1642 if (!vn_has_cached_data(vp)) /* no pages mapped */
1645 1643 return (0);
1646 1644
1647 1645 if (len == 0) { /* from 'off' to EOF */
1648 1646 error = pvn_vplist_dirty(vp, off, hsfs_putapage, flags, cr);
1649 1647 } else {
1650 1648 offset_t end_off = off + len;
1651 1649 offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
1652 1650 offset_t io_off;
1653 1651
1654 1652 file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
1655 1653 if (end_off > file_size)
1656 1654 end_off = file_size;
1657 1655
1658 1656 for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
1659 1657 page_t *pp;
1660 1658
1661 1659 /*
1662 1660 * We insist on getting the page only if we are
1663 1661 * about to invalidate, free or write it and
1664 1662 * the B_ASYNC flag is not set.
1665 1663 */
1666 1664 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1667 1665 pp = page_lookup(vp, io_off,
1668 1666 (flags & (B_INVAL | B_FREE)) ?
1669 1667 SE_EXCL : SE_SHARED);
1670 1668 } else {
1671 1669 pp = page_lookup_nowait(vp, io_off,
1672 1670 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
1673 1671 }
1674 1672
1675 1673 if (pp == NULL)
1676 1674 continue;
1677 1675
1678 1676 /*
1679 1677 * Normally pvn_getdirty() should return 0, which
1680 1678 * impies that it has done the job for us.
1681 1679 * The shouldn't-happen scenario is when it returns 1.
1682 1680 * This means that the page has been modified and
1683 1681 * needs to be put back.
1684 1682 * Since we can't write on a CD, we fake a failed
1685 1683 * I/O and force pvn_write_done() to destroy the page.
1686 1684 */
1687 1685 if (pvn_getdirty(pp, flags) == 1) {
1688 1686 cmn_err(CE_NOTE,
1689 1687 "hsfs_putpage: dirty HSFS page");
1690 1688 pvn_write_done(pp, flags |
1691 1689 B_ERROR | B_WRITE | B_INVAL | B_FORCE);
1692 1690 }
1693 1691 }
1694 1692 }
1695 1693 return (error);
1696 1694 }
1697 1695
1698 1696
1699 1697 /*ARGSUSED*/
1700 1698 static int
1701 1699 hsfs_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
1702 1700 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cred,
1703 1701 caller_context_t *ct)
1704 1702 {
1705 1703 struct segvn_crargs vn_a;
1706 1704 int error;
1707 1705
1708 1706 /* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
1709 1707
1710 1708 if (vp->v_flag & VNOMAP)
1711 1709 return (ENOSYS);
1712 1710
1713 1711 if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
1714 1712 return (ENOSYS);
1715 1713
1716 1714 if (off > HS_MAXFILEOFF || off < 0 ||
1717 1715 (off + len) < 0 || (off + len) > HS_MAXFILEOFF)
1718 1716 return (ENXIO);
1719 1717
1720 1718 if (vp->v_type != VREG) {
1721 1719 return (ENODEV);
1722 1720 }
1723 1721
1724 1722 /*
1725 1723 * If file is being locked, disallow mapping.
1726 1724 */
1727 1725 if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
1728 1726 return (EAGAIN);
1729 1727
1730 1728 as_rangelock(as);
1731 1729 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
1732 1730 if (error != 0) {
1733 1731 as_rangeunlock(as);
1734 1732 return (error);
1735 1733 }
1736 1734
1737 1735 vn_a.vp = vp;
1738 1736 vn_a.offset = off;
1739 1737 vn_a.type = flags & MAP_TYPE;
1740 1738 vn_a.prot = prot;
1741 1739 vn_a.maxprot = maxprot;
1742 1740 vn_a.flags = flags & ~MAP_TYPE;
1743 1741 vn_a.cred = cred;
1744 1742 vn_a.amp = NULL;
1745 1743 vn_a.szc = 0;
1746 1744 vn_a.lgrp_mem_policy_flags = 0;
1747 1745
1748 1746 error = as_map(as, *addrp, len, segvn_create, &vn_a);
1749 1747 as_rangeunlock(as);
1750 1748 return (error);
1751 1749 }
1752 1750
1753 1751 /* ARGSUSED */
1754 1752 static int
1755 1753 hsfs_addmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
1756 1754 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
1757 1755 caller_context_t *ct)
1758 1756 {
1759 1757 struct hsnode *hp;
1760 1758
1761 1759 if (vp->v_flag & VNOMAP)
1762 1760 return (ENOSYS);
1763 1761
1764 1762 hp = VTOH(vp);
1765 1763 mutex_enter(&hp->hs_contents_lock);
1766 1764 hp->hs_mapcnt += btopr(len);
1767 1765 mutex_exit(&hp->hs_contents_lock);
1768 1766 return (0);
1769 1767 }
1770 1768
1771 1769 /*ARGSUSED*/
1772 1770 static int
1773 1771 hsfs_delmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
1774 1772 size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cr,
1775 1773 caller_context_t *ct)
1776 1774 {
1777 1775 struct hsnode *hp;
1778 1776
1779 1777 if (vp->v_flag & VNOMAP)
1780 1778 return (ENOSYS);
1781 1779
1782 1780 hp = VTOH(vp);
1783 1781 mutex_enter(&hp->hs_contents_lock);
1784 1782 hp->hs_mapcnt -= btopr(len); /* Count released mappings */
1785 1783 ASSERT(hp->hs_mapcnt >= 0);
1786 1784 mutex_exit(&hp->hs_contents_lock);
1787 1785 return (0);
1788 1786 }
1789 1787
1790 1788 /* ARGSUSED */
1791 1789 static int
1792 1790 hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp,
1793 1791 caller_context_t *ct)
1794 1792 {
1795 1793 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1796 1794 }
1797 1795
1798 1796 /* ARGSUSED */
1799 1797 static int
1800 1798 hsfs_frlock(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
1801 1799 offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
1802 1800 caller_context_t *ct)
1803 1801 {
1804 1802 struct hsnode *hp = VTOH(vp);
1805 1803
1806 1804 /*
1807 1805 * If the file is being mapped, disallow fs_frlock.
1808 1806 * We are not holding the hs_contents_lock while checking
1809 1807 * hs_mapcnt because the current locking strategy drops all
1810 1808 * locks before calling fs_frlock.
1811 1809 * So, hs_mapcnt could change before we enter fs_frlock making
1812 1810 * it meaningless to have held hs_contents_lock in the first place.
1813 1811 */
1814 1812 if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
1815 1813 return (EAGAIN);
1816 1814
1817 1815 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
1818 1816 }
1819 1817
1820 1818 static int
1821 1819 hsched_deadline_compare(const void *x1, const void *x2)
1822 1820 {
1823 1821 const struct hio *h1 = x1;
1824 1822 const struct hio *h2 = x2;
1825 1823
1826 1824 if (h1->io_timestamp < h2->io_timestamp)
1827 1825 return (-1);
1828 1826 if (h1->io_timestamp > h2->io_timestamp)
1829 1827 return (1);
1830 1828
1831 1829 if (h1->io_lblkno < h2->io_lblkno)
1832 1830 return (-1);
1833 1831 if (h1->io_lblkno > h2->io_lblkno)
1834 1832 return (1);
1835 1833
1836 1834 if (h1 < h2)
1837 1835 return (-1);
1838 1836 if (h1 > h2)
1839 1837 return (1);
1840 1838
1841 1839 return (0);
1842 1840 }
1843 1841
1844 1842 static int
1845 1843 hsched_offset_compare(const void *x1, const void *x2)
1846 1844 {
1847 1845 const struct hio *h1 = x1;
1848 1846 const struct hio *h2 = x2;
1849 1847
1850 1848 if (h1->io_lblkno < h2->io_lblkno)
1851 1849 return (-1);
1852 1850 if (h1->io_lblkno > h2->io_lblkno)
1853 1851 return (1);
1854 1852
1855 1853 if (h1 < h2)
1856 1854 return (-1);
1857 1855 if (h1 > h2)
1858 1856 return (1);
1859 1857
1860 1858 return (0);
1861 1859 }
1862 1860
1863 1861 void
1864 1862 hsched_init_caches(void)
1865 1863 {
1866 1864 hio_cache = kmem_cache_create("hsfs_hio_cache",
1867 1865 sizeof (struct hio), 0, NULL,
1868 1866 NULL, NULL, NULL, NULL, 0);
1869 1867
1870 1868 hio_info_cache = kmem_cache_create("hsfs_hio_info_cache",
1871 1869 sizeof (struct hio_info), 0, NULL,
1872 1870 NULL, NULL, NULL, NULL, 0);
1873 1871 }
1874 1872
1875 1873 void
1876 1874 hsched_fini_caches(void)
1877 1875 {
1878 1876 kmem_cache_destroy(hio_cache);
1879 1877 kmem_cache_destroy(hio_info_cache);
1880 1878 }
1881 1879
1882 1880 /*
1883 1881 * Initialize I/O scheduling structures. This is called via hsfs_mount
1884 1882 */
1885 1883 void
1886 1884 hsched_init(struct hsfs *fsp, int fsid, struct modlinkage *modlinkage)
1887 1885 {
1888 1886 struct hsfs_queue *hqueue = fsp->hqueue;
1889 1887 struct vnode *vp = fsp->hsfs_devvp;
1890 1888
1891 1889 /* TaskQ name of the form: hsched_task_ + stringof(int) */
1892 1890 char namebuf[23];
1893 1891 int error, err;
1894 1892 struct dk_cinfo info;
1895 1893 ldi_handle_t lh;
1896 1894 ldi_ident_t li;
1897 1895
1898 1896 /*
1899 1897 * Default maxtransfer = 16k chunk
1900 1898 */
1901 1899 hqueue->dev_maxtransfer = 16384;
1902 1900
1903 1901 /*
1904 1902 * Try to fetch the maximum device transfer size. This is used to
1905 1903 * ensure that a coalesced block does not exceed the maxtransfer.
1906 1904 */
1907 1905 err = ldi_ident_from_mod(modlinkage, &li);
1908 1906 if (err) {
1909 1907 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
1910 1908 cmn_err(CE_NOTE, "hsched_init: ldi_ident_from_mod err=%d\n",
1911 1909 err);
1912 1910 goto set_ra;
1913 1911 }
1914 1912
1915 1913 err = ldi_open_by_dev(&(vp->v_rdev), OTYP_CHR, FREAD, CRED(), &lh, li);
1916 1914 ldi_ident_release(li);
1917 1915 if (err) {
1918 1916 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
1919 1917 cmn_err(CE_NOTE, "hsched_init: ldi_open err=%d\n", err);
1920 1918 goto set_ra;
1921 1919 }
1922 1920
1923 1921 error = ldi_ioctl(lh, DKIOCINFO, (intptr_t)&info, FKIOCTL,
1924 1922 CRED(), &err);
1925 1923 err = ldi_close(lh, FREAD, CRED());
1926 1924 if (err) {
1927 1925 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
1928 1926 cmn_err(CE_NOTE, "hsched_init: ldi_close err=%d\n", err);
1929 1927 }
1930 1928
1931 1929 if (error == 0) {
1932 1930 hqueue->dev_maxtransfer = ldbtob(info.dki_maxtransfer);
1933 1931 }
1934 1932
1935 1933 set_ra:
1936 1934 /*
1937 1935 * Max size of data to read ahead for sequential access pattern.
1938 1936 * Conservative to avoid letting the underlying CD drive to spin
1939 1937 * down, in case the application is reading slowly.
1940 1938 * We read ahead upto a max of 4 pages.
1941 1939 */
1942 1940 hqueue->max_ra_bytes = PAGESIZE * 8;
1943 1941
1944 1942 mutex_init(&(hqueue->hsfs_queue_lock), NULL, MUTEX_DEFAULT, NULL);
1945 1943 mutex_init(&(hqueue->strategy_lock), NULL, MUTEX_DEFAULT, NULL);
1946 1944 avl_create(&(hqueue->read_tree), hsched_offset_compare,
1947 1945 sizeof (struct hio), offsetof(struct hio, io_offset_node));
1948 1946 avl_create(&(hqueue->deadline_tree), hsched_deadline_compare,
1949 1947 sizeof (struct hio), offsetof(struct hio, io_deadline_node));
1950 1948
1951 1949 (void) snprintf(namebuf, sizeof (namebuf), "hsched_task_%d", fsid);
1952 1950 hqueue->ra_task = taskq_create(namebuf, hsfs_taskq_nthreads,
1953 1951 minclsyspri + 2, 1, 104857600 / PAGESIZE, TASKQ_DYNAMIC);
1954 1952
1955 1953 hqueue->next = NULL;
1956 1954 hqueue->nbuf = kmem_zalloc(sizeof (struct buf), KM_SLEEP);
1957 1955 }
1958 1956
1959 1957 void
1960 1958 hsched_fini(struct hsfs_queue *hqueue)
1961 1959 {
1962 1960 if (hqueue != NULL) {
1963 1961 /*
1964 1962 * Remove the sentinel if there was one.
1965 1963 */
1966 1964 if (hqueue->next != NULL) {
1967 1965 avl_remove(&hqueue->read_tree, hqueue->next);
1968 1966 kmem_cache_free(hio_cache, hqueue->next);
1969 1967 }
1970 1968 avl_destroy(&(hqueue->read_tree));
1971 1969 avl_destroy(&(hqueue->deadline_tree));
1972 1970 mutex_destroy(&(hqueue->hsfs_queue_lock));
1973 1971 mutex_destroy(&(hqueue->strategy_lock));
1974 1972
1975 1973 /*
1976 1974 * If there are any existing readahead threads running
1977 1975 * taskq_destroy will wait for them to finish.
1978 1976 */
1979 1977 taskq_destroy(hqueue->ra_task);
1980 1978 kmem_free(hqueue->nbuf, sizeof (struct buf));
1981 1979 }
1982 1980 }
1983 1981
1984 1982 /*
1985 1983 * Determine if two I/O requests are adjacent to each other so
1986 1984 * that they can coalesced.
1987 1985 */
1988 1986 #define IS_ADJACENT(io, nio) \
1989 1987 (((io)->io_lblkno + (io)->nblocks == (nio)->io_lblkno) && \
1990 1988 (io)->bp->b_edev == (nio)->bp->b_edev)
1991 1989
1992 1990 /*
1993 1991 * This performs the actual I/O scheduling logic. We use the Circular
1994 1992 * Look algorithm here. Sort the I/O requests in ascending order of
1995 1993 * logical block number and process them starting with the lowest
1996 1994 * numbered block and progressing towards higher block numbers in the
1997 1995 * queue. Once there are no more higher numbered blocks, start again
1998 1996 * with the lowest one. This is good for CD/DVD as you keep moving
1999 1997 * the head in one direction along the outward spiral track and avoid
2000 1998 * too many seeks as much as possible. The re-ordering also allows
2001 1999 * us to coalesce adjacent requests into one larger request.
2002 2000 * This is thus essentially a 1-way Elevator with front merging.
2003 2001 *
2004 2002 * In addition each read request here has a deadline and will be
2005 2003 * processed out of turn if the deadline (500ms) expires.
2006 2004 *
2007 2005 * This function is necessarily serialized via hqueue->strategy_lock.
2008 2006 * This function sits just below hsfs_getapage and processes all read
2009 2007 * requests orginating from that function.
2010 2008 */
2011 2009 int
2012 2010 hsched_invoke_strategy(struct hsfs *fsp)
2013 2011 {
2014 2012 struct hsfs_queue *hqueue;
2015 2013 struct buf *nbuf;
2016 2014 struct hio *fio, *nio, *tio, *prev, *last;
2017 2015 size_t bsize, soffset, offset, data;
2018 2016 int bioret, bufcount;
2019 2017 struct vnode *fvp;
2020 2018 ksema_t *io_done;
2021 2019 caddr_t iodata;
2022 2020
2023 2021 hqueue = fsp->hqueue;
2024 2022 mutex_enter(&hqueue->strategy_lock);
2025 2023 mutex_enter(&hqueue->hsfs_queue_lock);
2026 2024
2027 2025 /*
2028 2026 * Check for Deadline expiration first
2029 2027 */
2030 2028 fio = avl_first(&hqueue->deadline_tree);
2031 2029
2032 2030 /*
2033 2031 * Paranoid check for empty I/O queue. Both deadline
2034 2032 * and read trees contain same data sorted in different
2035 2033 * ways. So empty deadline tree = empty read tree.
2036 2034 */
2037 2035 if (fio == NULL) {
2038 2036 /*
2039 2037 * Remove the sentinel if there was one.
2040 2038 */
2041 2039 if (hqueue->next != NULL) {
2042 2040 avl_remove(&hqueue->read_tree, hqueue->next);
2043 2041 kmem_cache_free(hio_cache, hqueue->next);
2044 2042 hqueue->next = NULL;
2045 2043 }
2046 2044 mutex_exit(&hqueue->hsfs_queue_lock);
2047 2045 mutex_exit(&hqueue->strategy_lock);
2048 2046 return (1);
2049 2047 }
2050 2048
2051 2049 if (drv_hztousec(ddi_get_lbolt()) - fio->io_timestamp
2052 2050 < HSFS_READ_DEADLINE) {
2053 2051 /*
2054 2052 * Apply standard scheduling logic. This uses the
2055 2053 * C-LOOK approach. Process I/O requests in ascending
2056 2054 * order of logical block address till no subsequent
2057 2055 * higher numbered block request remains. Then start
2058 2056 * again from the lowest numbered block in the queue.
2059 2057 *
2060 2058 * We do this cheaply here by means of a sentinel.
2061 2059 * The last processed I/O structure from the previous
2062 2060 * invocation of this func, is left dangling in the
2063 2061 * read_tree so that we can easily scan to the next
2064 2062 * higher numbered request and remove the sentinel.
2065 2063 */
2066 2064 fio = NULL;
2067 2065 if (hqueue->next != NULL) {
2068 2066 fio = AVL_NEXT(&hqueue->read_tree, hqueue->next);
2069 2067 avl_remove(&hqueue->read_tree, hqueue->next);
2070 2068 kmem_cache_free(hio_cache, hqueue->next);
2071 2069 hqueue->next = NULL;
2072 2070 }
2073 2071 if (fio == NULL) {
2074 2072 fio = avl_first(&hqueue->read_tree);
2075 2073 }
2076 2074 } else if (hqueue->next != NULL) {
2077 2075 DTRACE_PROBE1(hsfs_deadline_expiry, struct hio *, fio);
2078 2076
2079 2077 avl_remove(&hqueue->read_tree, hqueue->next);
2080 2078 kmem_cache_free(hio_cache, hqueue->next);
2081 2079 hqueue->next = NULL;
2082 2080 }
2083 2081
2084 2082 /*
2085 2083 * In addition we try to coalesce contiguous
2086 2084 * requests into one bigger request.
2087 2085 */
2088 2086 bufcount = 1;
2089 2087 bsize = ldbtob(fio->nblocks);
2090 2088 fvp = fio->bp->b_file;
2091 2089 nio = AVL_NEXT(&hqueue->read_tree, fio);
2092 2090 tio = fio;
2093 2091 while (nio != NULL && IS_ADJACENT(tio, nio) &&
2094 2092 bsize < hqueue->dev_maxtransfer) {
2095 2093 avl_remove(&hqueue->deadline_tree, tio);
2096 2094 avl_remove(&hqueue->read_tree, tio);
2097 2095 tio->contig_chain = nio;
2098 2096 bsize += ldbtob(nio->nblocks);
2099 2097 prev = tio;
2100 2098 tio = nio;
2101 2099
2102 2100 /*
2103 2101 * This check is required to detect the case where
2104 2102 * we are merging adjacent buffers belonging to
2105 2103 * different files. fvp is used to set the b_file
2106 2104 * parameter in the coalesced buf. b_file is used
2107 2105 * by DTrace so we do not want DTrace to accrue
2108 2106 * requests to two different files to any one file.
2109 2107 */
2110 2108 if (fvp && tio->bp->b_file != fvp) {
2111 2109 fvp = NULL;
2112 2110 }
2113 2111
2114 2112 nio = AVL_NEXT(&hqueue->read_tree, nio);
2115 2113 bufcount++;
2116 2114 }
2117 2115
2118 2116 /*
2119 2117 * tio is not removed from the read_tree as it serves as a sentinel
2120 2118 * to cheaply allow us to scan to the next higher numbered I/O
2121 2119 * request.
2122 2120 */
2123 2121 hqueue->next = tio;
2124 2122 avl_remove(&hqueue->deadline_tree, tio);
2125 2123 mutex_exit(&hqueue->hsfs_queue_lock);
2126 2124 DTRACE_PROBE3(hsfs_io_dequeued, struct hio *, fio, int, bufcount,
2127 2125 size_t, bsize);
2128 2126
2129 2127 /*
2130 2128 * The benefit of coalescing occurs if the the savings in I/O outweighs
2131 2129 * the cost of doing the additional work below.
2132 2130 * It was observed that coalescing 2 buffers results in diminishing
2133 2131 * returns, so we do coalescing if we have >2 adjacent bufs.
2134 2132 */
2135 2133 if (bufcount > hsched_coalesce_min) {
2136 2134 /*
2137 2135 * We have coalesced blocks. First allocate mem and buf for
2138 2136 * the entire coalesced chunk.
2139 2137 * Since we are guaranteed single-threaded here we pre-allocate
2140 2138 * one buf at mount time and that is re-used every time. This
2141 2139 * is a synthesized buf structure that uses kmem_alloced chunk.
2142 2140 * Not quite a normal buf attached to pages.
2143 2141 */
2144 2142 fsp->coalesced_bytes += bsize;
2145 2143 nbuf = hqueue->nbuf;
2146 2144 bioinit(nbuf);
2147 2145 nbuf->b_edev = fio->bp->b_edev;
2148 2146 nbuf->b_dev = fio->bp->b_dev;
2149 2147 nbuf->b_flags = fio->bp->b_flags;
2150 2148 nbuf->b_iodone = fio->bp->b_iodone;
2151 2149 iodata = kmem_alloc(bsize, KM_SLEEP);
2152 2150 nbuf->b_un.b_addr = iodata;
2153 2151 nbuf->b_lblkno = fio->bp->b_lblkno;
2154 2152 nbuf->b_vp = fvp;
2155 2153 nbuf->b_file = fvp;
2156 2154 nbuf->b_bcount = bsize;
2157 2155 nbuf->b_bufsize = bsize;
2158 2156
2159 2157 DTRACE_PROBE3(hsfs_coalesced_io_start, struct hio *, fio, int,
2160 2158 bufcount, size_t, bsize);
2161 2159
2162 2160 /*
2163 2161 * Perform I/O for the coalesced block.
2164 2162 */
2165 2163 (void) bdev_strategy(nbuf);
2166 2164
2167 2165 /*
2168 2166 * Duplicate the last IO node to leave the sentinel alone.
2169 2167 * The sentinel is freed in the next invocation of this
2170 2168 * function.
2171 2169 */
2172 2170 prev->contig_chain = kmem_cache_alloc(hio_cache, KM_SLEEP);
2173 2171 prev->contig_chain->bp = tio->bp;
2174 2172 prev->contig_chain->sema = tio->sema;
2175 2173 tio = prev->contig_chain;
2176 2174 tio->contig_chain = NULL;
2177 2175 soffset = ldbtob(fio->bp->b_lblkno);
2178 2176 nio = fio;
2179 2177
2180 2178 bioret = biowait(nbuf);
2181 2179 data = bsize - nbuf->b_resid;
2182 2180 biofini(nbuf);
2183 2181 mutex_exit(&hqueue->strategy_lock);
2184 2182
2185 2183 /*
2186 2184 * We use the b_resid parameter to detect how much
2187 2185 * data was succesfully transferred. We will signal
2188 2186 * a success to all the fully retrieved actual bufs
2189 2187 * before coalescing, rest is signaled as error,
2190 2188 * if any.
2191 2189 */
2192 2190 tio = nio;
2193 2191 DTRACE_PROBE3(hsfs_coalesced_io_done, struct hio *, nio,
2194 2192 int, bioret, size_t, data);
2195 2193
2196 2194 /*
2197 2195 * Copy data and signal success to all the bufs
2198 2196 * which can be fully satisfied from b_resid.
2199 2197 */
2200 2198 while (nio != NULL && data >= nio->bp->b_bcount) {
2201 2199 offset = ldbtob(nio->bp->b_lblkno) - soffset;
2202 2200 bcopy(iodata + offset, nio->bp->b_un.b_addr,
2203 2201 nio->bp->b_bcount);
2204 2202 data -= nio->bp->b_bcount;
2205 2203 bioerror(nio->bp, 0);
2206 2204 biodone(nio->bp);
2207 2205 sema_v(nio->sema);
2208 2206 tio = nio;
2209 2207 nio = nio->contig_chain;
2210 2208 kmem_cache_free(hio_cache, tio);
2211 2209 }
2212 2210
2213 2211 /*
2214 2212 * Signal error to all the leftover bufs (if any)
2215 2213 * after b_resid data is exhausted.
2216 2214 */
2217 2215 while (nio != NULL) {
2218 2216 nio->bp->b_resid = nio->bp->b_bcount - data;
2219 2217 bzero(nio->bp->b_un.b_addr + data, nio->bp->b_resid);
2220 2218 bioerror(nio->bp, bioret);
2221 2219 biodone(nio->bp);
2222 2220 sema_v(nio->sema);
2223 2221 tio = nio;
2224 2222 nio = nio->contig_chain;
2225 2223 kmem_cache_free(hio_cache, tio);
2226 2224 data = 0;
2227 2225 }
2228 2226 kmem_free(iodata, bsize);
2229 2227 } else {
2230 2228
2231 2229 nbuf = tio->bp;
2232 2230 io_done = tio->sema;
2233 2231 nio = fio;
2234 2232 last = tio;
2235 2233
2236 2234 while (nio != NULL) {
2237 2235 (void) bdev_strategy(nio->bp);
2238 2236 nio = nio->contig_chain;
2239 2237 }
2240 2238 nio = fio;
2241 2239 mutex_exit(&hqueue->strategy_lock);
2242 2240
2243 2241 while (nio != NULL) {
2244 2242 if (nio == last) {
2245 2243 (void) biowait(nbuf);
2246 2244 sema_v(io_done);
2247 2245 break;
2248 2246 /* sentinel last not freed. See above. */
2249 2247 } else {
2250 2248 (void) biowait(nio->bp);
2251 2249 sema_v(nio->sema);
2252 2250 }
2253 2251 tio = nio;
2254 2252 nio = nio->contig_chain;
2255 2253 kmem_cache_free(hio_cache, tio);
2256 2254 }
2257 2255 }
2258 2256 return (0);
2259 2257 }
2260 2258
2261 2259 /*
2262 2260 * Insert an I/O request in the I/O scheduler's pipeline
2263 2261 * Using AVL tree makes it easy to reorder the I/O request
2264 2262 * based on logical block number.
2265 2263 */
2266 2264 static void
2267 2265 hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra)
2268 2266 {
2269 2267 struct hsfs_queue *hqueue = fsp->hqueue;
2270 2268
2271 2269 mutex_enter(&hqueue->hsfs_queue_lock);
2272 2270
2273 2271 fsp->physical_read_bytes += hsio->bp->b_bcount;
2274 2272 if (ra)
2275 2273 fsp->readahead_bytes += hsio->bp->b_bcount;
2276 2274
2277 2275 avl_add(&hqueue->deadline_tree, hsio);
2278 2276 avl_add(&hqueue->read_tree, hsio);
2279 2277
2280 2278 DTRACE_PROBE3(hsfs_io_enqueued, struct hio *, hsio,
2281 2279 struct hsfs_queue *, hqueue, int, ra);
2282 2280
2283 2281 mutex_exit(&hqueue->hsfs_queue_lock);
2284 2282 }
2285 2283
2286 2284 /* ARGSUSED */
2287 2285 static int
2288 2286 hsfs_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr,
2289 2287 caller_context_t *ct)
2290 2288 {
2291 2289 struct hsfs *fsp;
2292 2290
2293 2291 int error = 0;
2294 2292
2295 2293 switch (cmd) {
2296 2294
2297 2295 case _PC_NAME_MAX:
2298 2296 fsp = VFS_TO_HSFS(vp->v_vfsp);
2299 2297 *valp = fsp->hsfs_namemax;
2300 2298 break;
2301 2299
2302 2300 case _PC_FILESIZEBITS:
2303 2301 *valp = 33; /* Without multi extent support: 4 GB - 2k */
2304 2302 break;
2305 2303
2306 2304 case _PC_TIMESTAMP_RESOLUTION:
2307 2305 /*
2308 2306 * HSFS keeps, at best, 1/100 second timestamp resolution.
2309 2307 */
2310 2308 *valp = 10000000L;
2311 2309 break;
2312 2310
2313 2311 default:
2314 2312 error = fs_pathconf(vp, cmd, valp, cr, ct);
2315 2313 break;
2316 2314 }
2317 2315
2318 2316 return (error);
2319 2317 }
2320 2318
2321 2319
2322 2320
2323 2321 const fs_operation_def_t hsfs_vnodeops_template[] = {
2324 2322 VOPNAME_OPEN, { .vop_open = hsfs_open },
2325 2323 VOPNAME_CLOSE, { .vop_close = hsfs_close },
2326 2324 VOPNAME_READ, { .vop_read = hsfs_read },
2327 2325 VOPNAME_GETATTR, { .vop_getattr = hsfs_getattr },
2328 2326 VOPNAME_ACCESS, { .vop_access = hsfs_access },
2329 2327 VOPNAME_LOOKUP, { .vop_lookup = hsfs_lookup },
2330 2328 VOPNAME_READDIR, { .vop_readdir = hsfs_readdir },
2331 2329 VOPNAME_READLINK, { .vop_readlink = hsfs_readlink },
2332 2330 VOPNAME_FSYNC, { .vop_fsync = hsfs_fsync },
2333 2331 VOPNAME_INACTIVE, { .vop_inactive = hsfs_inactive },
2334 2332 VOPNAME_FID, { .vop_fid = hsfs_fid },
2335 2333 VOPNAME_SEEK, { .vop_seek = hsfs_seek },
2336 2334 VOPNAME_FRLOCK, { .vop_frlock = hsfs_frlock },
2337 2335 VOPNAME_GETPAGE, { .vop_getpage = hsfs_getpage },
2338 2336 VOPNAME_PUTPAGE, { .vop_putpage = hsfs_putpage },
2339 2337 VOPNAME_MAP, { .vop_map = hsfs_map },
2340 2338 VOPNAME_ADDMAP, { .vop_addmap = hsfs_addmap },
2341 2339 VOPNAME_DELMAP, { .vop_delmap = hsfs_delmap },
2342 2340 VOPNAME_PATHCONF, { .vop_pathconf = hsfs_pathconf },
2343 2341 NULL, NULL
2344 2342 };
2345 2343
2346 2344 struct vnodeops *hsfs_vnodeops;
|
↓ open down ↓ |
1153 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX