1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2013 OmniTI Computer Consulting, Inc. All rights reserved.
24 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
25 */
26
27 /*
28 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
29 * Use is subject to license terms.
30 */
31
32 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
33 /* All Rights Reserved */
34
35 #include <sys/types.h>
36 #include <sys/inttypes.h>
37 #include <sys/param.h>
38 #include <sys/sysmacros.h>
39 #include <sys/systm.h>
40 #include <sys/signal.h>
41 #include <sys/user.h>
42 #include <sys/errno.h>
43 #include <sys/var.h>
44 #include <sys/proc.h>
45 #include <sys/tuneable.h>
46 #include <sys/debug.h>
47 #include <sys/cmn_err.h>
48 #include <sys/cred.h>
49 #include <sys/vnode.h>
50 #include <sys/vfs.h>
51 #include <sys/vm.h>
52 #include <sys/file.h>
53 #include <sys/mman.h>
54 #include <sys/vmparam.h>
55 #include <sys/fcntl.h>
56 #include <sys/lwpchan_impl.h>
57 #include <sys/nbmlock.h>
58 #include <sys/brand.h>
59
60 #include <vm/hat.h>
61 #include <vm/as.h>
62 #include <vm/seg.h>
63 #include <vm/seg_dev.h>
64 #include <vm/seg_vn.h>
65
66 int use_brk_lpg = 1;
67 int use_stk_lpg = 1;
68
69 static int brk_lpg(caddr_t nva);
70 static int grow_lpg(caddr_t sp);
71
72 int
73 brk(caddr_t nva)
74 {
75 int error;
76 proc_t *p = curproc;
77
78 /*
79 * Serialize brk operations on an address space.
80 * This also serves as the lock protecting p_brksize
81 * and p_brkpageszc.
82 */
83 as_rangelock(p->p_as);
84 if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
85 error = brk_lpg(nva);
86 } else {
87 error = brk_internal(nva, p->p_brkpageszc);
88 }
89 as_rangeunlock(p->p_as);
90 return ((error != 0 ? set_errno(error) : 0));
91 }
92
93 /*
94 * Algorithm: call arch-specific map_pgsz to get best page size to use,
95 * then call brk_internal().
96 * Returns 0 on success.
97 */
98 static int
99 brk_lpg(caddr_t nva)
100 {
101 struct proc *p = curproc;
102 size_t pgsz, len;
103 caddr_t addr, brkend;
104 caddr_t bssbase = p->p_bssbase;
105 caddr_t brkbase = p->p_brkbase;
106 int oszc, szc;
107 int err;
108
109 oszc = p->p_brkpageszc;
110
111 /*
112 * If p_brkbase has not yet been set, the first call
113 * to brk_internal() will initialize it.
114 */
115 if (brkbase == 0) {
116 return (brk_internal(nva, oszc));
117 }
118
119 len = nva - bssbase;
120
121 pgsz = map_pgsz(MAPPGSZ_HEAP, p, bssbase, len, 0);
122 szc = page_szc(pgsz);
123
124 /*
125 * Covers two cases:
126 * 1. page_szc() returns -1 for invalid page size, so we want to
127 * ignore it in that case.
128 * 2. By design we never decrease page size, as it is more stable.
129 */
130 if (szc <= oszc) {
131 err = brk_internal(nva, oszc);
132 /* If failed, back off to base page size. */
133 if (err != 0 && oszc != 0) {
134 err = brk_internal(nva, 0);
135 }
136 return (err);
137 }
138
139 err = brk_internal(nva, szc);
140 /* If using szc failed, map with base page size and return. */
141 if (err != 0) {
142 if (szc != 0) {
143 err = brk_internal(nva, 0);
144 }
145 return (err);
146 }
147
148 /*
149 * Round up brk base to a large page boundary and remap
150 * anything in the segment already faulted in beyond that
151 * point.
152 */
153 addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, pgsz);
154 brkend = brkbase + p->p_brksize;
155 len = brkend - addr;
156 /* Check that len is not negative. Update page size code for heap. */
157 if (addr >= p->p_bssbase && brkend > addr && IS_P2ALIGNED(len, pgsz)) {
158 (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
159 p->p_brkpageszc = szc;
160 }
161
162 ASSERT(err == 0);
163 return (err); /* should always be 0 */
164 }
165
166 /*
167 * Returns 0 on success.
168 */
169 int
170 brk_internal(caddr_t nva, uint_t brkszc)
171 {
172 caddr_t ova; /* current break address */
173 size_t size;
174 int error;
175 struct proc *p = curproc;
176 struct as *as = p->p_as;
177 size_t pgsz;
178 uint_t szc;
179 rctl_qty_t as_rctl;
180
181 /*
182 * extend heap to brkszc alignment but use current p->p_brkpageszc
183 * for the newly created segment. This allows the new extension
184 * segment to be concatenated successfully with the existing brk
185 * segment.
186 */
187 if ((szc = brkszc) != 0) {
188 pgsz = page_get_pagesize(szc);
189 ASSERT(pgsz > PAGESIZE);
190 } else {
191 pgsz = PAGESIZE;
192 }
193
194 mutex_enter(&p->p_lock);
195 as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA],
196 p->p_rctls, p);
197 mutex_exit(&p->p_lock);
198
199 /*
200 * If p_brkbase has not yet been set, the first call
201 * to brk() will initialize it.
202 */
203 if (p->p_brkbase == 0)
204 p->p_brkbase = nva;
205
206 /*
207 * Before multiple page size support existed p_brksize was the value
208 * not rounded to the pagesize (i.e. it stored the exact user request
209 * for heap size). If pgsz is greater than PAGESIZE calculate the
210 * heap size as the real new heap size by rounding it up to pgsz.
211 * This is useful since we may want to know where the heap ends
212 * without knowing heap pagesize (e.g. some old code) and also if
213 * heap pagesize changes we can update p_brkpageszc but delay adding
214 * new mapping yet still know from p_brksize where the heap really
215 * ends. The user requested heap end is stored in libc variable.
216 */
217 if (pgsz > PAGESIZE) {
218 caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
219 size = tnva - p->p_brkbase;
220 if (tnva < p->p_brkbase || (size > p->p_brksize &&
221 size > (size_t)as_rctl)) {
222 szc = 0;
223 pgsz = PAGESIZE;
224 size = nva - p->p_brkbase;
225 }
226 } else {
227 size = nva - p->p_brkbase;
228 }
229
230 /*
231 * use PAGESIZE to roundup ova because we want to know the real value
232 * of the current heap end in case p_brkpageszc changes since the last
233 * p_brksize was computed.
234 */
235 nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
236 ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize),
237 PAGESIZE);
238
239 if ((nva < p->p_brkbase) || (size > p->p_brksize &&
240 size > as_rctl)) {
241 mutex_enter(&p->p_lock);
242 (void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p,
243 RCA_SAFE);
244 mutex_exit(&p->p_lock);
245 return (ENOMEM);
246 }
247
248 if (nva > ova) {
249 struct segvn_crargs crargs =
250 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
251
252 if (!(p->p_datprot & PROT_EXEC)) {
253 crargs.prot &= ~PROT_EXEC;
254 }
255
256 /*
257 * Add new zfod mapping to extend UNIX data segment
258 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
259 * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
260 * page sizes if ova is not aligned to szc's pgsz.
261 */
262 if (szc > 0) {
263 caddr_t rbss;
264
265 rbss = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase,
266 pgsz);
267 if (IS_P2ALIGNED(p->p_bssbase, pgsz) || ova > rbss) {
268 crargs.szc = p->p_brkpageszc ? p->p_brkpageszc :
269 AS_MAP_NO_LPOOB;
270 } else if (ova == rbss) {
271 crargs.szc = szc;
272 } else {
273 crargs.szc = AS_MAP_HEAP;
274 }
275 } else {
276 crargs.szc = AS_MAP_NO_LPOOB;
277 }
278 crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP;
279 error = as_map(as, ova, (size_t)(nva - ova), segvn_create,
280 &crargs);
281 if (error) {
282 return (error);
283 }
284
285 } else if (nva < ova) {
286 /*
287 * Release mapping to shrink UNIX data segment.
288 */
289 (void) as_unmap(as, nva, (size_t)(ova - nva));
290 }
291 p->p_brksize = size;
292 return (0);
293 }
294
295 /*
296 * Grow the stack to include sp. Return 1 if successful, 0 otherwise.
297 * This routine assumes that the stack grows downward.
298 */
299 int
300 grow(caddr_t sp)
301 {
302 struct proc *p = curproc;
303 struct as *as = p->p_as;
304 size_t oldsize = p->p_stksize;
305 size_t newsize;
306 int err;
307
308 /*
309 * Serialize grow operations on an address space.
310 * This also serves as the lock protecting p_stksize
311 * and p_stkpageszc.
312 */
313 as_rangelock(as);
314 if (use_stk_lpg && (p->p_flag & SAUTOLPG) != 0) {
315 err = grow_lpg(sp);
316 } else {
317 err = grow_internal(sp, p->p_stkpageszc);
318 }
319 as_rangeunlock(as);
320
321 if (err == 0 && (newsize = p->p_stksize) > oldsize) {
322 ASSERT(IS_P2ALIGNED(oldsize, PAGESIZE));
323 ASSERT(IS_P2ALIGNED(newsize, PAGESIZE));
324 /*
325 * Set up translations so the process doesn't have to fault in
326 * the stack pages we just gave it.
327 */
328 (void) as_fault(as->a_hat, as, p->p_usrstack - newsize,
329 newsize - oldsize, F_INVAL, S_WRITE);
330 }
331 return ((err == 0 ? 1 : 0));
332 }
333
334 /*
335 * Algorithm: call arch-specific map_pgsz to get best page size to use,
336 * then call grow_internal().
337 * Returns 0 on success.
338 */
339 static int
340 grow_lpg(caddr_t sp)
341 {
342 struct proc *p = curproc;
343 size_t pgsz;
344 size_t len, newsize;
345 caddr_t addr, saddr;
346 caddr_t growend;
347 int oszc, szc;
348 int err;
349
350 newsize = p->p_usrstack - sp;
351
352 oszc = p->p_stkpageszc;
353 pgsz = map_pgsz(MAPPGSZ_STK, p, sp, newsize, 0);
354 szc = page_szc(pgsz);
355
356 /*
357 * Covers two cases:
358 * 1. page_szc() returns -1 for invalid page size, so we want to
359 * ignore it in that case.
360 * 2. By design we never decrease page size, as it is more stable.
361 * This shouldn't happen as the stack never shrinks.
362 */
363 if (szc <= oszc) {
364 err = grow_internal(sp, oszc);
365 /* failed, fall back to base page size */
366 if (err != 0 && oszc != 0) {
367 err = grow_internal(sp, 0);
368 }
369 return (err);
370 }
371
372 /*
373 * We've grown sufficiently to switch to a new page size.
374 * So we are going to remap the whole segment with the new page size.
375 */
376 err = grow_internal(sp, szc);
377 /* The grow with szc failed, so fall back to base page size. */
378 if (err != 0) {
379 if (szc != 0) {
380 err = grow_internal(sp, 0);
381 }
382 return (err);
383 }
384
385 /*
386 * Round up stack pointer to a large page boundary and remap
387 * any pgsz pages in the segment already faulted in beyond that
388 * point.
389 */
390 saddr = p->p_usrstack - p->p_stksize;
391 addr = (caddr_t)P2ROUNDUP((uintptr_t)saddr, pgsz);
392 growend = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack, pgsz);
393 len = growend - addr;
394 /* Check that len is not negative. Update page size code for stack. */
395 if (addr >= saddr && growend > addr && IS_P2ALIGNED(len, pgsz)) {
396 (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
397 p->p_stkpageszc = szc;
398 }
399
400 ASSERT(err == 0);
401 return (err); /* should always be 0 */
402 }
403
404 /*
405 * This routine assumes that the stack grows downward.
406 * Returns 0 on success, errno on failure.
407 */
408 int
409 grow_internal(caddr_t sp, uint_t growszc)
410 {
411 struct proc *p = curproc;
412 size_t newsize;
413 size_t oldsize;
414 int error;
415 size_t pgsz;
416 uint_t szc;
417 struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
418
419 ASSERT(sp < p->p_usrstack);
420 sp = (caddr_t)P2ALIGN((uintptr_t)sp, PAGESIZE);
421
422 /*
423 * grow to growszc alignment but use current p->p_stkpageszc for
424 * the segvn_crargs szc passed to segvn_create. For memcntl to
425 * increase the szc, this allows the new extension segment to be
426 * concatenated successfully with the existing stack segment.
427 */
428 if ((szc = growszc) != 0) {
429 pgsz = page_get_pagesize(szc);
430 ASSERT(pgsz > PAGESIZE);
431 newsize = p->p_usrstack - (caddr_t)P2ALIGN((uintptr_t)sp, pgsz);
432 if (newsize > (size_t)p->p_stk_ctl) {
433 szc = 0;
434 pgsz = PAGESIZE;
435 newsize = p->p_usrstack - sp;
436 }
437 } else {
438 pgsz = PAGESIZE;
439 newsize = p->p_usrstack - sp;
440 }
441
442 if (newsize > (size_t)p->p_stk_ctl) {
443 (void) rctl_action(rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p,
444 RCA_UNSAFE_ALL);
445
446 return (ENOMEM);
447 }
448
449 oldsize = p->p_stksize;
450 ASSERT(P2PHASE(oldsize, PAGESIZE) == 0);
451
452 if (newsize <= oldsize) { /* prevent the stack from shrinking */
453 return (0);
454 }
455
456 if (!(p->p_stkprot & PROT_EXEC)) {
457 crargs.prot &= ~PROT_EXEC;
458 }
459 /*
460 * extend stack with the proposed new growszc, which is different
461 * than p_stkpageszc only on a memcntl to increase the stack pagesize.
462 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via
463 * map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes
464 * if not aligned to szc's pgsz.
465 */
466 if (szc > 0) {
467 caddr_t oldsp = p->p_usrstack - oldsize;
468 caddr_t austk = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack,
469 pgsz);
470
471 if (IS_P2ALIGNED(p->p_usrstack, pgsz) || oldsp < austk) {
472 crargs.szc = p->p_stkpageszc ? p->p_stkpageszc :
473 AS_MAP_NO_LPOOB;
474 } else if (oldsp == austk) {
475 crargs.szc = szc;
476 } else {
477 crargs.szc = AS_MAP_STACK;
478 }
479 } else {
480 crargs.szc = AS_MAP_NO_LPOOB;
481 }
482 crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;
483
484 if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
485 segvn_create, &crargs)) != 0) {
486 if (error == EAGAIN) {
487 cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
488 "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
489 }
490 return (error);
491 }
492 p->p_stksize = newsize;
493 return (0);
494 }
495
496 /*
497 * Find address for user to map.
498 * If MAP_FIXED is not specified, we can pick any address we want, but we will
499 * first try the value in *addrp if it is non-NULL. Thus this is implementing
500 * a way to try and get a preferred address.
501 */
502 int
503 choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
504 int vacalign, uint_t flags)
505 {
506 caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
507 size_t lenp = len;
508
509 ASSERT(AS_ISCLAIMGAP(as)); /* searches should be serialized */
510 if (flags & MAP_FIXED) {
511 (void) as_unmap(as, *addrp, len);
512 return (0);
513 } else if (basep != NULL && ((flags & MAP_ALIGN) == 0) &&
514 !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
515 /* User supplied address was available */
516 *addrp = basep;
517 } else {
518 /*
519 * No user supplied address or the address supplied was not
520 * available.
521 */
522 map_addr(addrp, len, off, vacalign, flags);
523 }
524 if (*addrp == NULL)
525 return (ENOMEM);
526 return (0);
527 }
528
529 caddr_t
530 map_userlimit(proc_t *pp, struct as *as, int flags)
531 {
532 if (flags & _MAP_LOW32) {
533 if (PROC_IS_BRANDED(pp) && BROP(pp)->b_map32limit != NULL) {
534 return ((caddr_t)(uintptr_t)BROP(pp)->b_map32limit(pp));
535 } else {
536 return ((caddr_t)_userlimit32);
537 }
538 }
539
540 return (as->a_userlimit);
541 }
542
543
544 /*
545 * Used for MAP_ANON - fast way to get anonymous pages
546 */
547 static int
548 zmap(struct as *as, caddr_t *addrp, size_t len, uint_t uprot, int flags,
549 offset_t pos)
550 {
551 struct segvn_crargs vn_a;
552 int error;
553
554 if (((PROT_ALL & uprot) != uprot))
555 return (EACCES);
556
557 if ((flags & MAP_FIXED) != 0) {
558 /*
559 * Use the user address. First verify that
560 * the address to be used is page aligned.
561 * Then make some simple bounds checks.
562 */
563 if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
564 return (EINVAL);
565
566 switch (valid_usr_range(*addrp, len, uprot, as,
567 map_userlimit(as->a_proc, as, flags))) {
568 case RANGE_OKAY:
569 break;
570 case RANGE_BADPROT:
571 return (ENOTSUP);
572 case RANGE_BADADDR:
573 default:
574 return (ENOMEM);
575 }
576 }
577 /*
578 * No need to worry about vac alignment for anonymous
579 * pages since this is a "clone" object that doesn't
580 * yet exist.
581 */
582 error = choose_addr(as, addrp, len, pos, ADDR_NOVACALIGN, flags);
583 if (error != 0) {
584 return (error);
585 }
586
587 /*
588 * Use the seg_vn segment driver; passing in the NULL amp
589 * gives the desired "cloning" effect.
590 */
591 vn_a.vp = NULL;
592 vn_a.offset = 0;
593 vn_a.type = flags & MAP_TYPE;
594 vn_a.prot = uprot;
595 vn_a.maxprot = PROT_ALL;
596 vn_a.flags = flags & ~MAP_TYPE;
597 vn_a.cred = CRED();
598 vn_a.amp = NULL;
599 vn_a.szc = 0;
600 vn_a.lgrp_mem_policy_flags = 0;
601
602 return (as_map(as, *addrp, len, segvn_create, &vn_a));
603 }
604
605 static int
606 smmap_common(caddr_t *addrp, size_t len,
607 int prot, int flags, struct file *fp, offset_t pos)
608 {
609 struct vnode *vp;
610 struct as *as = curproc->p_as;
611 uint_t uprot, maxprot, type;
612 int error;
613 int in_crit = 0;
614
615 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
616 _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
617 MAP_TEXT | MAP_INITDATA)) != 0) {
618 /* | MAP_RENAME */ /* not implemented, let user know */
619 return (EINVAL);
620 }
621
622 if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
623 return (EINVAL);
624 }
625
626 if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
627 return (EINVAL);
628 }
629
630 #if defined(__sparc)
631 /*
632 * See if this is an "old mmap call". If so, remember this
633 * fact and convert the flags value given to mmap to indicate
634 * the specified address in the system call must be used.
635 * _MAP_NEW is turned set by all new uses of mmap.
636 */
637 if ((flags & _MAP_NEW) == 0)
638 flags |= MAP_FIXED;
639 #endif
640 flags &= ~_MAP_NEW;
641
642 type = flags & MAP_TYPE;
643 if (type != MAP_PRIVATE && type != MAP_SHARED)
644 return (EINVAL);
645
646
647 if (flags & MAP_ALIGN) {
648
649 if (flags & MAP_FIXED)
650 return (EINVAL);
651
652 /* alignment needs to be a power of 2 >= page size */
653 if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
654 !ISP2((uintptr_t)*addrp))
655 return (EINVAL);
656 }
657 /*
658 * Check for bad lengths and file position.
659 * We let the VOP_MAP routine check for negative lengths
660 * since on some vnode types this might be appropriate.
661 */
662 if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
663 return (EINVAL);
664
665 maxprot = PROT_ALL; /* start out allowing all accesses */
666 uprot = prot | PROT_USER;
667
668 if (fp == NULL) {
669 ASSERT(flags & MAP_ANON);
670 /* discard lwpchan mappings, like munmap() */
671 if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
672 lwpchan_delete_mapping(curproc, *addrp, *addrp + len);
673 as_rangelock(as);
674 error = zmap(as, addrp, len, uprot, flags, pos);
675 as_rangeunlock(as);
676 /*
677 * Tell machine specific code that lwp has mapped shared memory
678 */
679 if (error == 0 && (flags & MAP_SHARED)) {
680 /* EMPTY */
681 LWP_MMODEL_SHARED_AS(*addrp, len);
682 }
683 return (error);
684 } else if ((flags & MAP_ANON) != 0)
685 return (EINVAL);
686
687 vp = fp->f_vnode;
688
689 /* Can't execute code from "noexec" mounted filesystem. */
690 if ((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0)
691 maxprot &= ~PROT_EXEC;
692
693 /*
694 * These checks were added as part of large files.
695 *
696 * Return ENXIO if the initial position is negative; return EOVERFLOW
697 * if (offset + len) would overflow the maximum allowed offset for the
698 * type of file descriptor being used.
699 */
700 if (vp->v_type == VREG) {
701 if (pos < 0)
702 return (ENXIO);
703 if ((offset_t)len > (OFFSET_MAX(fp) - pos))
704 return (EOVERFLOW);
705 }
706
707 if (type == MAP_SHARED && (fp->f_flag & FWRITE) == 0) {
708 /* no write access allowed */
709 maxprot &= ~PROT_WRITE;
710 }
711
712 /*
713 * XXX - Do we also adjust maxprot based on protections
714 * of the vnode? E.g. if no execute permission is given
715 * on the vnode for the current user, maxprot probably
716 * should disallow PROT_EXEC also? This is different
717 * from the write access as this would be a per vnode
718 * test as opposed to a per fd test for writability.
719 */
720
721 /*
722 * Verify that the specified protections are not greater than
723 * the maximum allowable protections. Also test to make sure
724 * that the file descriptor does allows for read access since
725 * "write only" mappings are hard to do since normally we do
726 * the read from the file before the page can be written.
727 */
728 if (((maxprot & uprot) != uprot) || (fp->f_flag & FREAD) == 0)
729 return (EACCES);
730
731 /*
732 * If the user specified an address, do some simple checks here
733 */
734 if ((flags & MAP_FIXED) != 0) {
735 /*
736 * Use the user address. First verify that
737 * the address to be used is page aligned.
738 * Then make some simple bounds checks.
739 */
740 if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
741 return (EINVAL);
742 switch (valid_usr_range(*addrp, len, uprot, as,
743 map_userlimit(curproc, as, flags))) {
744 case RANGE_OKAY:
745 break;
746 case RANGE_BADPROT:
747 return (ENOTSUP);
748 case RANGE_BADADDR:
749 default:
750 return (ENOMEM);
751 }
752 }
753
754 if ((prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) &&
755 nbl_need_check(vp)) {
756 int svmand;
757 nbl_op_t nop;
758
759 nbl_start_crit(vp, RW_READER);
760 in_crit = 1;
761 error = nbl_svmand(vp, fp->f_cred, &svmand);
762 if (error != 0)
763 goto done;
764 if ((prot & PROT_WRITE) && (type == MAP_SHARED)) {
765 if (prot & (PROT_READ | PROT_EXEC)) {
766 nop = NBL_READWRITE;
767 } else {
768 nop = NBL_WRITE;
769 }
770 } else {
771 nop = NBL_READ;
772 }
773 if (nbl_conflict(vp, nop, 0, LONG_MAX, svmand, NULL)) {
774 error = EACCES;
775 goto done;
776 }
777 }
778
779 /* discard lwpchan mappings, like munmap() */
780 if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
781 lwpchan_delete_mapping(curproc, *addrp, *addrp + len);
782
783 /*
784 * Ok, now let the vnode map routine do its thing to set things up.
785 */
786 error = VOP_MAP(vp, pos, as,
787 addrp, len, uprot, maxprot, flags, fp->f_cred, NULL);
788
789 if (error == 0) {
790 /*
791 * Tell machine specific code that lwp has mapped shared memory
792 */
793 if (flags & MAP_SHARED) {
794 /* EMPTY */
795 LWP_MMODEL_SHARED_AS(*addrp, len);
796 }
797 if (vp->v_type == VREG &&
798 (flags & (MAP_TEXT | MAP_INITDATA)) != 0) {
799 /*
800 * Mark this as an executable vnode
801 */
802 mutex_enter(&vp->v_lock);
803 vp->v_flag |= VVMEXEC;
804 mutex_exit(&vp->v_lock);
805 }
806 }
807
808 done:
809 if (in_crit)
810 nbl_end_crit(vp);
811 return (error);
812 }
813
814 #ifdef _LP64
815 /*
816 * LP64 mmap(2) system call: 64-bit offset, 64-bit address.
817 *
818 * The "large file" mmap routine mmap64(2) is also mapped to this routine
819 * by the 64-bit version of libc.
820 *
821 * Eventually, this should be the only version, and have smmap_common()
822 * folded back into it again. Some day.
823 */
824 caddr_t
825 smmap64(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos)
826 {
827 struct file *fp;
828 int error;
829
830 if (fd == -1 && (flags & MAP_ANON) != 0)
831 error = smmap_common(&addr, len, prot, flags,
832 NULL, (offset_t)pos);
833 else if ((fp = getf(fd)) != NULL) {
834 error = smmap_common(&addr, len, prot, flags,
835 fp, (offset_t)pos);
836 releasef(fd);
837 } else
838 error = EBADF;
839
840 return (error ? (caddr_t)(uintptr_t)set_errno(error) : addr);
841 }
842 #endif /* _LP64 */
843
844 #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
845
846 /*
847 * ILP32 mmap(2) system call: 32-bit offset, 32-bit address.
848 */
849 caddr_t
850 smmap32(caddr32_t addr, size32_t len, int prot, int flags, int fd, off32_t pos)
851 {
852 struct file *fp;
853 int error;
854 caddr_t a = (caddr_t)(uintptr_t)addr;
855
856 if (flags & _MAP_LOW32)
857 error = EINVAL;
858 else if (fd == -1 && (flags & MAP_ANON) != 0)
859 error = smmap_common(&a, (size_t)len, prot,
860 flags | _MAP_LOW32, NULL, (offset_t)pos);
861 else if ((fp = getf(fd)) != NULL) {
862 error = smmap_common(&a, (size_t)len, prot,
863 flags | _MAP_LOW32, fp, (offset_t)pos);
864 releasef(fd);
865 } else
866 error = EBADF;
867
868 ASSERT(error != 0 || (uintptr_t)(a + len) < (uintptr_t)UINT32_MAX);
869
870 return (error ? (caddr_t)(uintptr_t)set_errno(error) : a);
871 }
872
873 /*
874 * ILP32 mmap64(2) system call: 64-bit offset, 32-bit address.
875 *
876 * Now things really get ugly because we can't use the C-style
877 * calling convention for more than 6 args, and 64-bit parameter
878 * passing on 32-bit systems is less than clean.
879 */
880
881 struct mmaplf32a {
882 caddr_t addr;
883 size_t len;
884 #ifdef _LP64
885 /*
886 * 32-bit contents, 64-bit cells
887 */
888 uint64_t prot;
889 uint64_t flags;
890 uint64_t fd;
891 uint64_t offhi;
892 uint64_t offlo;
893 #else
894 /*
895 * 32-bit contents, 32-bit cells
896 */
897 uint32_t prot;
898 uint32_t flags;
899 uint32_t fd;
900 uint32_t offhi;
901 uint32_t offlo;
902 #endif
903 };
904
905 int
906 smmaplf32(struct mmaplf32a *uap, rval_t *rvp)
907 {
908 struct file *fp;
909 int error;
910 caddr_t a = uap->addr;
911 int flags = (int)uap->flags;
912 int fd = (int)uap->fd;
913 #ifdef _BIG_ENDIAN
914 offset_t off = ((u_offset_t)uap->offhi << 32) | (u_offset_t)uap->offlo;
915 #else
916 offset_t off = ((u_offset_t)uap->offlo << 32) | (u_offset_t)uap->offhi;
917 #endif
918
919 if (flags & _MAP_LOW32)
920 error = EINVAL;
921 else if (fd == -1 && (flags & MAP_ANON) != 0)
922 error = smmap_common(&a, uap->len, (int)uap->prot,
923 flags | _MAP_LOW32, NULL, off);
924 else if ((fp = getf(fd)) != NULL) {
925 error = smmap_common(&a, uap->len, (int)uap->prot,
926 flags | _MAP_LOW32, fp, off);
927 releasef(fd);
928 } else
929 error = EBADF;
930
931 if (error == 0)
932 rvp->r_val1 = (uintptr_t)a;
933 return (error);
934 }
935
936 #endif /* _SYSCALL32_IMPL || _ILP32 */
937
938 int
939 munmap(caddr_t addr, size_t len)
940 {
941 struct proc *p = curproc;
942 struct as *as = p->p_as;
943
944 if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0)
945 return (set_errno(EINVAL));
946
947 if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY)
948 return (set_errno(EINVAL));
949
950 /*
951 * Discard lwpchan mappings.
952 */
953 if (p->p_lcp != NULL)
954 lwpchan_delete_mapping(p, addr, addr + len);
955 if (as_unmap(as, addr, len) != 0)
956 return (set_errno(EINVAL));
957
958 return (0);
959 }
960
961 int
962 mprotect(caddr_t addr, size_t len, int prot)
963 {
964 struct as *as = curproc->p_as;
965 uint_t uprot = prot | PROT_USER;
966 int error;
967
968 if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0)
969 return (set_errno(EINVAL));
970
971 switch (valid_usr_range(addr, len, prot, as, as->a_userlimit)) {
972 case RANGE_OKAY:
973 break;
974 case RANGE_BADPROT:
975 return (set_errno(ENOTSUP));
976 case RANGE_BADADDR:
977 default:
978 return (set_errno(ENOMEM));
979 }
980
981 error = as_setprot(as, addr, len, uprot);
982 if (error)
983 return (set_errno(error));
984 return (0);
985 }
986
987 #define MC_CACHE 128 /* internal result buffer */
988 #define MC_QUANTUM (MC_CACHE * PAGESIZE) /* addresses covered in loop */
989
990 int
991 mincore(caddr_t addr, size_t len, char *vecp)
992 {
993 struct as *as = curproc->p_as;
994 caddr_t ea; /* end address of loop */
995 size_t rl; /* inner result length */
996 char vec[MC_CACHE]; /* local vector cache */
997 int error;
998 model_t model;
999 long llen;
1000
1001 model = get_udatamodel();
1002 /*
1003 * Validate form of address parameters.
1004 */
1005 if (model == DATAMODEL_NATIVE) {
1006 llen = (long)len;
1007 } else {
1008 llen = (int32_t)(size32_t)len;
1009 }
1010 if (((uintptr_t)addr & PAGEOFFSET) != 0 || llen <= 0)
1011 return (set_errno(EINVAL));
1012
1013 if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY)
1014 return (set_errno(ENOMEM));
1015
1016 /*
1017 * Loop over subranges of interval [addr : addr + len), recovering
1018 * results internally and then copying them out to caller. Subrange
1019 * is based on the size of MC_CACHE, defined above.
1020 */
1021 for (ea = addr + len; addr < ea; addr += MC_QUANTUM) {
1022 error = as_incore(as, addr,
1023 (size_t)MIN(MC_QUANTUM, ea - addr), vec, &rl);
1024 if (rl != 0) {
1025 rl = (rl + PAGESIZE - 1) / PAGESIZE;
1026 if (copyout(vec, vecp, rl) != 0)
1027 return (set_errno(EFAULT));
1028 vecp += rl;
1029 }
1030 if (error != 0)
1031 return (set_errno(ENOMEM));
1032 }
1033 return (0);
1034 }