FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_mmap.c
1 /*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
35 *
36 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
37 */
38
39 /*
40 * Mapped file (mmap) interface to VM
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/11.2/sys/vm/vm_mmap.c 331722 2018-03-29 02:50:57Z eadler $");
45
46 #include "opt_compat.h"
47 #include "opt_hwpmc_hooks.h"
48 #include "opt_vm.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/capsicum.h>
53 #include <sys/kernel.h>
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <sys/sysproto.h>
57 #include <sys/filedesc.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/procctl.h>
61 #include <sys/racct.h>
62 #include <sys/resource.h>
63 #include <sys/resourcevar.h>
64 #include <sys/rwlock.h>
65 #include <sys/sysctl.h>
66 #include <sys/vnode.h>
67 #include <sys/fcntl.h>
68 #include <sys/file.h>
69 #include <sys/mman.h>
70 #include <sys/mount.h>
71 #include <sys/conf.h>
72 #include <sys/stat.h>
73 #include <sys/syscallsubr.h>
74 #include <sys/sysent.h>
75 #include <sys/vmmeter.h>
76
77 #include <security/audit/audit.h>
78 #include <security/mac/mac_framework.h>
79
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_extern.h>
89 #include <vm/vm_page.h>
90 #include <vm/vnode_pager.h>
91
92 #ifdef HWPMC_HOOKS
93 #include <sys/pmckern.h>
94 #endif
95
96 int old_mlock = 0;
97 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
98 "Do not apply RLIMIT_MEMLOCK on mlockall");
99
100 #ifdef MAP_32BIT
101 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31)
102 #endif
103
104 #ifndef _SYS_SYSPROTO_H_
105 struct sbrk_args {
106 int incr;
107 };
108 #endif
109
110 int
111 sys_sbrk(struct thread *td, struct sbrk_args *uap)
112 {
113 /* Not yet implemented */
114 return (EOPNOTSUPP);
115 }
116
117 #ifndef _SYS_SYSPROTO_H_
118 struct sstk_args {
119 int incr;
120 };
121 #endif
122
123 int
124 sys_sstk(struct thread *td, struct sstk_args *uap)
125 {
126 /* Not yet implemented */
127 return (EOPNOTSUPP);
128 }
129
130 #if defined(COMPAT_43)
131 #ifndef _SYS_SYSPROTO_H_
132 struct getpagesize_args {
133 int dummy;
134 };
135 #endif
136
137 int
138 ogetpagesize(struct thread *td, struct getpagesize_args *uap)
139 {
140
141 td->td_retval[0] = PAGE_SIZE;
142 return (0);
143 }
144 #endif /* COMPAT_43 */
145
146
147 /*
148 * Memory Map (mmap) system call. Note that the file offset
149 * and address are allowed to be NOT page aligned, though if
150 * the MAP_FIXED flag it set, both must have the same remainder
151 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not
152 * page-aligned, the actual mapping starts at trunc_page(addr)
153 * and the return value is adjusted up by the page offset.
154 *
155 * Generally speaking, only character devices which are themselves
156 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
157 * there would be no cache coherency between a descriptor and a VM mapping
158 * both to the same character device.
159 */
160 #ifndef _SYS_SYSPROTO_H_
161 struct mmap_args {
162 void *addr;
163 size_t len;
164 int prot;
165 int flags;
166 int fd;
167 long pad;
168 off_t pos;
169 };
170 #endif
171
172 int
173 sys_mmap(struct thread *td, struct mmap_args *uap)
174 {
175
176 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
177 uap->flags, uap->fd, uap->pos));
178 }
179
180 int
181 kern_mmap(struct thread *td, uintptr_t addr0, size_t size, int prot, int flags,
182 int fd, off_t pos)
183 {
184 struct vmspace *vms;
185 struct file *fp;
186 vm_offset_t addr;
187 vm_size_t pageoff;
188 vm_prot_t cap_maxprot;
189 int align, error;
190 cap_rights_t rights;
191
192 vms = td->td_proc->p_vmspace;
193 fp = NULL;
194 AUDIT_ARG_FD(fd);
195 addr = addr0;
196
197 /*
198 * Ignore old flags that used to be defined but did not do anything.
199 */
200 flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
201
202 /*
203 * Enforce the constraints.
204 * Mapping of length 0 is only allowed for old binaries.
205 * Anonymous mapping shall specify -1 as filedescriptor and
206 * zero position for new code. Be nice to ancient a.out
207 * binaries and correct pos for anonymous mapping, since old
208 * ld.so sometimes issues anonymous map requests with non-zero
209 * pos.
210 */
211 if (!SV_CURPROC_FLAG(SV_AOUT)) {
212 if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
213 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
214 return (EINVAL);
215 } else {
216 if ((flags & MAP_ANON) != 0)
217 pos = 0;
218 }
219
220 if (flags & MAP_STACK) {
221 if ((fd != -1) ||
222 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
223 return (EINVAL);
224 flags |= MAP_ANON;
225 pos = 0;
226 }
227 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
228 MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
229 MAP_PREFAULT_READ | MAP_GUARD |
230 #ifdef MAP_32BIT
231 MAP_32BIT |
232 #endif
233 MAP_ALIGNMENT_MASK)) != 0)
234 return (EINVAL);
235 if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
236 return (EINVAL);
237 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
238 return (EINVAL);
239 if (prot != PROT_NONE &&
240 (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
241 return (EINVAL);
242 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
243 pos != 0 || (flags & (MAP_SHARED | MAP_PRIVATE | MAP_PREFAULT |
244 MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0))
245 return (EINVAL);
246
247 /*
248 * Align the file position to a page boundary,
249 * and save its page offset component.
250 */
251 pageoff = (pos & PAGE_MASK);
252 pos -= pageoff;
253
254 /* Adjust size for rounding (on both ends). */
255 size += pageoff; /* low end... */
256 size = (vm_size_t) round_page(size); /* hi end */
257
258 /* Ensure alignment is at least a page and fits in a pointer. */
259 align = flags & MAP_ALIGNMENT_MASK;
260 if (align != 0 && align != MAP_ALIGNED_SUPER &&
261 (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
262 align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
263 return (EINVAL);
264
265 /*
266 * Check for illegal addresses. Watch out for address wrap... Note
267 * that VM_*_ADDRESS are not constants due to casts (argh).
268 */
269 if (flags & MAP_FIXED) {
270 /*
271 * The specified address must have the same remainder
272 * as the file offset taken modulo PAGE_SIZE, so it
273 * should be aligned after adjustment by pageoff.
274 */
275 addr -= pageoff;
276 if (addr & PAGE_MASK)
277 return (EINVAL);
278
279 /* Address range must be all in user VM space. */
280 if (addr < vm_map_min(&vms->vm_map) ||
281 addr + size > vm_map_max(&vms->vm_map))
282 return (EINVAL);
283 if (addr + size < addr)
284 return (EINVAL);
285 #ifdef MAP_32BIT
286 if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
287 return (EINVAL);
288 } else if (flags & MAP_32BIT) {
289 /*
290 * For MAP_32BIT, override the hint if it is too high and
291 * do not bother moving the mapping past the heap (since
292 * the heap is usually above 2GB).
293 */
294 if (addr + size > MAP_32BIT_MAX_ADDR)
295 addr = 0;
296 #endif
297 } else {
298 /*
299 * XXX for non-fixed mappings where no hint is provided or
300 * the hint would fall in the potential heap space,
301 * place it after the end of the largest possible heap.
302 *
303 * There should really be a pmap call to determine a reasonable
304 * location.
305 */
306 if (addr == 0 ||
307 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
308 addr < round_page((vm_offset_t)vms->vm_daddr +
309 lim_max(td, RLIMIT_DATA))))
310 addr = round_page((vm_offset_t)vms->vm_daddr +
311 lim_max(td, RLIMIT_DATA));
312 }
313 if (size == 0) {
314 /*
315 * Return success without mapping anything for old
316 * binaries that request a page-aligned mapping of
317 * length 0. For modern binaries, this function
318 * returns an error earlier.
319 */
320 error = 0;
321 } else if ((flags & MAP_GUARD) != 0) {
322 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
323 VM_PROT_NONE, flags, NULL, pos, FALSE, td);
324 } else if ((flags & MAP_ANON) != 0) {
325 /*
326 * Mapping blank space is trivial.
327 *
328 * This relies on VM_PROT_* matching PROT_*.
329 */
330 error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
331 VM_PROT_ALL, flags, NULL, pos, FALSE, td);
332 } else {
333 /*
334 * Mapping file, get fp for validation and don't let the
335 * descriptor disappear on us if we block. Check capability
336 * rights, but also return the maximum rights to be combined
337 * with maxprot later.
338 */
339 cap_rights_init(&rights, CAP_MMAP);
340 if (prot & PROT_READ)
341 cap_rights_set(&rights, CAP_MMAP_R);
342 if ((flags & MAP_SHARED) != 0) {
343 if (prot & PROT_WRITE)
344 cap_rights_set(&rights, CAP_MMAP_W);
345 }
346 if (prot & PROT_EXEC)
347 cap_rights_set(&rights, CAP_MMAP_X);
348 error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
349 if (error != 0)
350 goto done;
351 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
352 td->td_proc->p_osrel >= P_OSREL_MAP_FSTRICT) {
353 error = EINVAL;
354 goto done;
355 }
356
357 /* This relies on VM_PROT_* matching PROT_*. */
358 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
359 cap_maxprot, flags, pos, td);
360 }
361
362 if (error == 0)
363 td->td_retval[0] = (register_t) (addr + pageoff);
364 done:
365 if (fp)
366 fdrop(fp, td);
367
368 return (error);
369 }
370
371 #if defined(COMPAT_FREEBSD6)
372 int
373 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
374 {
375
376 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
377 uap->flags, uap->fd, uap->pos));
378 }
379 #endif
380
381 #ifdef COMPAT_43
382 #ifndef _SYS_SYSPROTO_H_
383 struct ommap_args {
384 caddr_t addr;
385 int len;
386 int prot;
387 int flags;
388 int fd;
389 long pos;
390 };
391 #endif
392 int
393 ommap(struct thread *td, struct ommap_args *uap)
394 {
395 static const char cvtbsdprot[8] = {
396 0,
397 PROT_EXEC,
398 PROT_WRITE,
399 PROT_EXEC | PROT_WRITE,
400 PROT_READ,
401 PROT_EXEC | PROT_READ,
402 PROT_WRITE | PROT_READ,
403 PROT_EXEC | PROT_WRITE | PROT_READ,
404 };
405 int flags, prot;
406
407 #define OMAP_ANON 0x0002
408 #define OMAP_COPY 0x0020
409 #define OMAP_SHARED 0x0010
410 #define OMAP_FIXED 0x0100
411
412 prot = cvtbsdprot[uap->prot & 0x7];
413 #ifdef COMPAT_FREEBSD32
414 #if defined(__amd64__)
415 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
416 prot != 0)
417 prot |= PROT_EXEC;
418 #endif
419 #endif
420 flags = 0;
421 if (uap->flags & OMAP_ANON)
422 flags |= MAP_ANON;
423 if (uap->flags & OMAP_COPY)
424 flags |= MAP_COPY;
425 if (uap->flags & OMAP_SHARED)
426 flags |= MAP_SHARED;
427 else
428 flags |= MAP_PRIVATE;
429 if (uap->flags & OMAP_FIXED)
430 flags |= MAP_FIXED;
431 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
432 uap->fd, uap->pos));
433 }
434 #endif /* COMPAT_43 */
435
436
437 #ifndef _SYS_SYSPROTO_H_
438 struct msync_args {
439 void *addr;
440 size_t len;
441 int flags;
442 };
443 #endif
444 int
445 sys_msync(struct thread *td, struct msync_args *uap)
446 {
447
448 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
449 }
450
451 int
452 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
453 {
454 vm_offset_t addr;
455 vm_size_t pageoff;
456 vm_map_t map;
457 int rv;
458
459 addr = addr0;
460 pageoff = (addr & PAGE_MASK);
461 addr -= pageoff;
462 size += pageoff;
463 size = (vm_size_t) round_page(size);
464 if (addr + size < addr)
465 return (EINVAL);
466
467 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
468 return (EINVAL);
469
470 map = &td->td_proc->p_vmspace->vm_map;
471
472 /*
473 * Clean the pages and interpret the return value.
474 */
475 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
476 (flags & MS_INVALIDATE) != 0);
477 switch (rv) {
478 case KERN_SUCCESS:
479 return (0);
480 case KERN_INVALID_ADDRESS:
481 return (ENOMEM);
482 case KERN_INVALID_ARGUMENT:
483 return (EBUSY);
484 case KERN_FAILURE:
485 return (EIO);
486 default:
487 return (EINVAL);
488 }
489 }
490
491 #ifndef _SYS_SYSPROTO_H_
492 struct munmap_args {
493 void *addr;
494 size_t len;
495 };
496 #endif
497 int
498 sys_munmap(struct thread *td, struct munmap_args *uap)
499 {
500
501 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
502 }
503
504 int
505 kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
506 {
507 #ifdef HWPMC_HOOKS
508 struct pmckern_map_out pkm;
509 vm_map_entry_t entry;
510 bool pmc_handled;
511 #endif
512 vm_offset_t addr;
513 vm_size_t pageoff;
514 vm_map_t map;
515
516 if (size == 0)
517 return (EINVAL);
518
519 addr = addr0;
520 pageoff = (addr & PAGE_MASK);
521 addr -= pageoff;
522 size += pageoff;
523 size = (vm_size_t) round_page(size);
524 if (addr + size < addr)
525 return (EINVAL);
526
527 /*
528 * Check for illegal addresses. Watch out for address wrap...
529 */
530 map = &td->td_proc->p_vmspace->vm_map;
531 if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
532 return (EINVAL);
533 vm_map_lock(map);
534 #ifdef HWPMC_HOOKS
535 pmc_handled = false;
536 if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
537 pmc_handled = true;
538 /*
539 * Inform hwpmc if the address range being unmapped contains
540 * an executable region.
541 */
542 pkm.pm_address = (uintptr_t) NULL;
543 if (vm_map_lookup_entry(map, addr, &entry)) {
544 for (; entry->start < addr + size;
545 entry = entry->next) {
546 if (vm_map_check_protection(map, entry->start,
547 entry->end, VM_PROT_EXECUTE) == TRUE) {
548 pkm.pm_address = (uintptr_t) addr;
549 pkm.pm_size = (size_t) size;
550 break;
551 }
552 }
553 }
554 }
555 #endif
556 vm_map_delete(map, addr, addr + size);
557
558 #ifdef HWPMC_HOOKS
559 if (__predict_false(pmc_handled)) {
560 /* downgrade the lock to prevent a LOR with the pmc-sx lock */
561 vm_map_lock_downgrade(map);
562 if (pkm.pm_address != (uintptr_t) NULL)
563 PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
564 vm_map_unlock_read(map);
565 } else
566 #endif
567 vm_map_unlock(map);
568
569 /* vm_map_delete returns nothing but KERN_SUCCESS anyway */
570 return (0);
571 }
572
573 #ifndef _SYS_SYSPROTO_H_
574 struct mprotect_args {
575 const void *addr;
576 size_t len;
577 int prot;
578 };
579 #endif
580 int
581 sys_mprotect(struct thread *td, struct mprotect_args *uap)
582 {
583
584 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot));
585 }
586
587 int
588 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
589 {
590 vm_offset_t addr;
591 vm_size_t pageoff;
592
593 addr = addr0;
594 prot = (prot & VM_PROT_ALL);
595 pageoff = (addr & PAGE_MASK);
596 addr -= pageoff;
597 size += pageoff;
598 size = (vm_size_t) round_page(size);
599 if (addr + size < addr)
600 return (EINVAL);
601
602 switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
603 addr + size, prot, FALSE)) {
604 case KERN_SUCCESS:
605 return (0);
606 case KERN_PROTECTION_FAILURE:
607 return (EACCES);
608 case KERN_RESOURCE_SHORTAGE:
609 return (ENOMEM);
610 }
611 return (EINVAL);
612 }
613
614 #ifndef _SYS_SYSPROTO_H_
615 struct minherit_args {
616 void *addr;
617 size_t len;
618 int inherit;
619 };
620 #endif
621 int
622 sys_minherit(struct thread *td, struct minherit_args *uap)
623 {
624 vm_offset_t addr;
625 vm_size_t size, pageoff;
626 vm_inherit_t inherit;
627
628 addr = (vm_offset_t)uap->addr;
629 size = uap->len;
630 inherit = uap->inherit;
631
632 pageoff = (addr & PAGE_MASK);
633 addr -= pageoff;
634 size += pageoff;
635 size = (vm_size_t) round_page(size);
636 if (addr + size < addr)
637 return (EINVAL);
638
639 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
640 addr + size, inherit)) {
641 case KERN_SUCCESS:
642 return (0);
643 case KERN_PROTECTION_FAILURE:
644 return (EACCES);
645 }
646 return (EINVAL);
647 }
648
649 #ifndef _SYS_SYSPROTO_H_
650 struct madvise_args {
651 void *addr;
652 size_t len;
653 int behav;
654 };
655 #endif
656
657 int
658 sys_madvise(struct thread *td, struct madvise_args *uap)
659 {
660
661 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
662 }
663
664 int
665 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
666 {
667 vm_map_t map;
668 vm_offset_t addr, end, start;
669 int flags;
670
671 /*
672 * Check for our special case, advising the swap pager we are
673 * "immortal."
674 */
675 if (behav == MADV_PROTECT) {
676 flags = PPROT_SET;
677 return (kern_procctl(td, P_PID, td->td_proc->p_pid,
678 PROC_SPROTECT, &flags));
679 }
680
681 /*
682 * Check for illegal behavior
683 */
684 if (behav < 0 || behav > MADV_CORE)
685 return (EINVAL);
686 /*
687 * Check for illegal addresses. Watch out for address wrap... Note
688 * that VM_*_ADDRESS are not constants due to casts (argh).
689 */
690 map = &td->td_proc->p_vmspace->vm_map;
691 addr = addr0;
692 if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
693 return (EINVAL);
694 if ((addr + len) < addr)
695 return (EINVAL);
696
697 /*
698 * Since this routine is only advisory, we default to conservative
699 * behavior.
700 */
701 start = trunc_page(addr);
702 end = round_page(addr + len);
703
704 if (vm_map_madvise(map, start, end, behav))
705 return (EINVAL);
706 return (0);
707 }
708
709 #ifndef _SYS_SYSPROTO_H_
710 struct mincore_args {
711 const void *addr;
712 size_t len;
713 char *vec;
714 };
715 #endif
716
717 int
718 sys_mincore(struct thread *td, struct mincore_args *uap)
719 {
720
721 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
722 }
723
724 int
725 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
726 {
727 vm_offset_t addr, first_addr;
728 vm_offset_t end, cend;
729 pmap_t pmap;
730 vm_map_t map;
731 int error = 0;
732 int vecindex, lastvecindex;
733 vm_map_entry_t current;
734 vm_map_entry_t entry;
735 vm_object_t object;
736 vm_paddr_t locked_pa;
737 vm_page_t m;
738 vm_pindex_t pindex;
739 int mincoreinfo;
740 unsigned int timestamp;
741 boolean_t locked;
742
743 /*
744 * Make sure that the addresses presented are valid for user
745 * mode.
746 */
747 first_addr = addr = trunc_page(addr0);
748 end = addr + (vm_size_t)round_page(len);
749 map = &td->td_proc->p_vmspace->vm_map;
750 if (end > vm_map_max(map) || end < addr)
751 return (ENOMEM);
752
753 pmap = vmspace_pmap(td->td_proc->p_vmspace);
754
755 vm_map_lock_read(map);
756 RestartScan:
757 timestamp = map->timestamp;
758
759 if (!vm_map_lookup_entry(map, addr, &entry)) {
760 vm_map_unlock_read(map);
761 return (ENOMEM);
762 }
763
764 /*
765 * Do this on a map entry basis so that if the pages are not
766 * in the current processes address space, we can easily look
767 * up the pages elsewhere.
768 */
769 lastvecindex = -1;
770 for (current = entry; current->start < end; current = current->next) {
771
772 /*
773 * check for contiguity
774 */
775 if (current->end < end && current->next->start > current->end) {
776 vm_map_unlock_read(map);
777 return (ENOMEM);
778 }
779
780 /*
781 * ignore submaps (for now) or null objects
782 */
783 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
784 current->object.vm_object == NULL)
785 continue;
786
787 /*
788 * limit this scan to the current map entry and the
789 * limits for the mincore call
790 */
791 if (addr < current->start)
792 addr = current->start;
793 cend = current->end;
794 if (cend > end)
795 cend = end;
796
797 /*
798 * scan this entry one page at a time
799 */
800 while (addr < cend) {
801 /*
802 * Check pmap first, it is likely faster, also
803 * it can provide info as to whether we are the
804 * one referencing or modifying the page.
805 */
806 object = NULL;
807 locked_pa = 0;
808 retry:
809 m = NULL;
810 mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
811 if (locked_pa != 0) {
812 /*
813 * The page is mapped by this process but not
814 * both accessed and modified. It is also
815 * managed. Acquire the object lock so that
816 * other mappings might be examined.
817 */
818 m = PHYS_TO_VM_PAGE(locked_pa);
819 if (m->object != object) {
820 if (object != NULL)
821 VM_OBJECT_WUNLOCK(object);
822 object = m->object;
823 locked = VM_OBJECT_TRYWLOCK(object);
824 vm_page_unlock(m);
825 if (!locked) {
826 VM_OBJECT_WLOCK(object);
827 vm_page_lock(m);
828 goto retry;
829 }
830 } else
831 vm_page_unlock(m);
832 KASSERT(m->valid == VM_PAGE_BITS_ALL,
833 ("mincore: page %p is mapped but invalid",
834 m));
835 } else if (mincoreinfo == 0) {
836 /*
837 * The page is not mapped by this process. If
838 * the object implements managed pages, then
839 * determine if the page is resident so that
840 * the mappings might be examined.
841 */
842 if (current->object.vm_object != object) {
843 if (object != NULL)
844 VM_OBJECT_WUNLOCK(object);
845 object = current->object.vm_object;
846 VM_OBJECT_WLOCK(object);
847 }
848 if (object->type == OBJT_DEFAULT ||
849 object->type == OBJT_SWAP ||
850 object->type == OBJT_VNODE) {
851 pindex = OFF_TO_IDX(current->offset +
852 (addr - current->start));
853 m = vm_page_lookup(object, pindex);
854 if (m != NULL && m->valid == 0)
855 m = NULL;
856 if (m != NULL)
857 mincoreinfo = MINCORE_INCORE;
858 }
859 }
860 if (m != NULL) {
861 /* Examine other mappings to the page. */
862 if (m->dirty == 0 && pmap_is_modified(m))
863 vm_page_dirty(m);
864 if (m->dirty != 0)
865 mincoreinfo |= MINCORE_MODIFIED_OTHER;
866 /*
867 * The first test for PGA_REFERENCED is an
868 * optimization. The second test is
869 * required because a concurrent pmap
870 * operation could clear the last reference
871 * and set PGA_REFERENCED before the call to
872 * pmap_is_referenced().
873 */
874 if ((m->aflags & PGA_REFERENCED) != 0 ||
875 pmap_is_referenced(m) ||
876 (m->aflags & PGA_REFERENCED) != 0)
877 mincoreinfo |= MINCORE_REFERENCED_OTHER;
878 }
879 if (object != NULL)
880 VM_OBJECT_WUNLOCK(object);
881
882 /*
883 * subyte may page fault. In case it needs to modify
884 * the map, we release the lock.
885 */
886 vm_map_unlock_read(map);
887
888 /*
889 * calculate index into user supplied byte vector
890 */
891 vecindex = atop(addr - first_addr);
892
893 /*
894 * If we have skipped map entries, we need to make sure that
895 * the byte vector is zeroed for those skipped entries.
896 */
897 while ((lastvecindex + 1) < vecindex) {
898 ++lastvecindex;
899 error = subyte(vec + lastvecindex, 0);
900 if (error) {
901 error = EFAULT;
902 goto done2;
903 }
904 }
905
906 /*
907 * Pass the page information to the user
908 */
909 error = subyte(vec + vecindex, mincoreinfo);
910 if (error) {
911 error = EFAULT;
912 goto done2;
913 }
914
915 /*
916 * If the map has changed, due to the subyte, the previous
917 * output may be invalid.
918 */
919 vm_map_lock_read(map);
920 if (timestamp != map->timestamp)
921 goto RestartScan;
922
923 lastvecindex = vecindex;
924 addr += PAGE_SIZE;
925 }
926 }
927
928 /*
929 * subyte may page fault. In case it needs to modify
930 * the map, we release the lock.
931 */
932 vm_map_unlock_read(map);
933
934 /*
935 * Zero the last entries in the byte vector.
936 */
937 vecindex = atop(end - first_addr);
938 while ((lastvecindex + 1) < vecindex) {
939 ++lastvecindex;
940 error = subyte(vec + lastvecindex, 0);
941 if (error) {
942 error = EFAULT;
943 goto done2;
944 }
945 }
946
947 /*
948 * If the map has changed, due to the subyte, the previous
949 * output may be invalid.
950 */
951 vm_map_lock_read(map);
952 if (timestamp != map->timestamp)
953 goto RestartScan;
954 vm_map_unlock_read(map);
955 done2:
956 return (error);
957 }
958
959 #ifndef _SYS_SYSPROTO_H_
960 struct mlock_args {
961 const void *addr;
962 size_t len;
963 };
964 #endif
965 int
966 sys_mlock(struct thread *td, struct mlock_args *uap)
967 {
968
969 return (kern_mlock(td->td_proc, td->td_ucred,
970 __DECONST(uintptr_t, uap->addr), uap->len));
971 }
972
973 int
974 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
975 {
976 vm_offset_t addr, end, last, start;
977 vm_size_t npages, size;
978 vm_map_t map;
979 unsigned long nsize;
980 int error;
981
982 error = priv_check_cred(cred, PRIV_VM_MLOCK, 0);
983 if (error)
984 return (error);
985 addr = addr0;
986 size = len;
987 last = addr + size;
988 start = trunc_page(addr);
989 end = round_page(last);
990 if (last < addr || end < addr)
991 return (EINVAL);
992 npages = atop(end - start);
993 if (npages > vm_page_max_wired)
994 return (ENOMEM);
995 map = &proc->p_vmspace->vm_map;
996 PROC_LOCK(proc);
997 nsize = ptoa(npages + pmap_wired_count(map->pmap));
998 if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
999 PROC_UNLOCK(proc);
1000 return (ENOMEM);
1001 }
1002 PROC_UNLOCK(proc);
1003 if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
1004 return (EAGAIN);
1005 #ifdef RACCT
1006 if (racct_enable) {
1007 PROC_LOCK(proc);
1008 error = racct_set(proc, RACCT_MEMLOCK, nsize);
1009 PROC_UNLOCK(proc);
1010 if (error != 0)
1011 return (ENOMEM);
1012 }
1013 #endif
1014 error = vm_map_wire(map, start, end,
1015 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1016 #ifdef RACCT
1017 if (racct_enable && error != KERN_SUCCESS) {
1018 PROC_LOCK(proc);
1019 racct_set(proc, RACCT_MEMLOCK,
1020 ptoa(pmap_wired_count(map->pmap)));
1021 PROC_UNLOCK(proc);
1022 }
1023 #endif
1024 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1025 }
1026
1027 #ifndef _SYS_SYSPROTO_H_
1028 struct mlockall_args {
1029 int how;
1030 };
1031 #endif
1032
1033 int
1034 sys_mlockall(struct thread *td, struct mlockall_args *uap)
1035 {
1036 vm_map_t map;
1037 int error;
1038
1039 map = &td->td_proc->p_vmspace->vm_map;
1040 error = priv_check(td, PRIV_VM_MLOCK);
1041 if (error)
1042 return (error);
1043
1044 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1045 return (EINVAL);
1046
1047 /*
1048 * If wiring all pages in the process would cause it to exceed
1049 * a hard resource limit, return ENOMEM.
1050 */
1051 if (!old_mlock && uap->how & MCL_CURRENT) {
1052 PROC_LOCK(td->td_proc);
1053 if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) {
1054 PROC_UNLOCK(td->td_proc);
1055 return (ENOMEM);
1056 }
1057 PROC_UNLOCK(td->td_proc);
1058 }
1059 #ifdef RACCT
1060 if (racct_enable) {
1061 PROC_LOCK(td->td_proc);
1062 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1063 PROC_UNLOCK(td->td_proc);
1064 if (error != 0)
1065 return (ENOMEM);
1066 }
1067 #endif
1068
1069 if (uap->how & MCL_FUTURE) {
1070 vm_map_lock(map);
1071 vm_map_modflags(map, MAP_WIREFUTURE, 0);
1072 vm_map_unlock(map);
1073 error = 0;
1074 }
1075
1076 if (uap->how & MCL_CURRENT) {
1077 /*
1078 * P1003.1-2001 mandates that all currently mapped pages
1079 * will be memory resident and locked (wired) upon return
1080 * from mlockall(). vm_map_wire() will wire pages, by
1081 * calling vm_fault_wire() for each page in the region.
1082 */
1083 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1084 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1085 error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1086 }
1087 #ifdef RACCT
1088 if (racct_enable && error != KERN_SUCCESS) {
1089 PROC_LOCK(td->td_proc);
1090 racct_set(td->td_proc, RACCT_MEMLOCK,
1091 ptoa(pmap_wired_count(map->pmap)));
1092 PROC_UNLOCK(td->td_proc);
1093 }
1094 #endif
1095
1096 return (error);
1097 }
1098
1099 #ifndef _SYS_SYSPROTO_H_
1100 struct munlockall_args {
1101 register_t dummy;
1102 };
1103 #endif
1104
1105 int
1106 sys_munlockall(struct thread *td, struct munlockall_args *uap)
1107 {
1108 vm_map_t map;
1109 int error;
1110
1111 map = &td->td_proc->p_vmspace->vm_map;
1112 error = priv_check(td, PRIV_VM_MUNLOCK);
1113 if (error)
1114 return (error);
1115
1116 /* Clear the MAP_WIREFUTURE flag from this vm_map. */
1117 vm_map_lock(map);
1118 vm_map_modflags(map, 0, MAP_WIREFUTURE);
1119 vm_map_unlock(map);
1120
1121 /* Forcibly unwire all pages. */
1122 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1123 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1124 #ifdef RACCT
1125 if (racct_enable && error == KERN_SUCCESS) {
1126 PROC_LOCK(td->td_proc);
1127 racct_set(td->td_proc, RACCT_MEMLOCK, 0);
1128 PROC_UNLOCK(td->td_proc);
1129 }
1130 #endif
1131
1132 return (error);
1133 }
1134
1135 #ifndef _SYS_SYSPROTO_H_
1136 struct munlock_args {
1137 const void *addr;
1138 size_t len;
1139 };
1140 #endif
1141 int
1142 sys_munlock(struct thread *td, struct munlock_args *uap)
1143 {
1144
1145 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
1146 }
1147
1148 int
1149 kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
1150 {
1151 vm_offset_t addr, end, last, start;
1152 #ifdef RACCT
1153 vm_map_t map;
1154 #endif
1155 int error;
1156
1157 error = priv_check(td, PRIV_VM_MUNLOCK);
1158 if (error)
1159 return (error);
1160 addr = addr0;
1161 last = addr + size;
1162 start = trunc_page(addr);
1163 end = round_page(last);
1164 if (last < addr || end < addr)
1165 return (EINVAL);
1166 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1167 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1168 #ifdef RACCT
1169 if (racct_enable && error == KERN_SUCCESS) {
1170 PROC_LOCK(td->td_proc);
1171 map = &td->td_proc->p_vmspace->vm_map;
1172 racct_set(td->td_proc, RACCT_MEMLOCK,
1173 ptoa(pmap_wired_count(map->pmap)));
1174 PROC_UNLOCK(td->td_proc);
1175 }
1176 #endif
1177 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1178 }
1179
1180 /*
1181 * vm_mmap_vnode()
1182 *
1183 * Helper function for vm_mmap. Perform sanity check specific for mmap
1184 * operations on vnodes.
1185 */
1186 int
1187 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1188 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1189 struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
1190 boolean_t *writecounted)
1191 {
1192 struct vattr va;
1193 vm_object_t obj;
1194 vm_ooffset_t foff;
1195 struct ucred *cred;
1196 int error, flags, locktype;
1197
1198 cred = td->td_ucred;
1199 if ((*maxprotp & VM_PROT_WRITE) && (*flagsp & MAP_SHARED))
1200 locktype = LK_EXCLUSIVE;
1201 else
1202 locktype = LK_SHARED;
1203 if ((error = vget(vp, locktype, td)) != 0)
1204 return (error);
1205 AUDIT_ARG_VNODE1(vp);
1206 foff = *foffp;
1207 flags = *flagsp;
1208 obj = vp->v_object;
1209 if (vp->v_type == VREG) {
1210 /*
1211 * Get the proper underlying object
1212 */
1213 if (obj == NULL) {
1214 error = EINVAL;
1215 goto done;
1216 }
1217 if (obj->type == OBJT_VNODE && obj->handle != vp) {
1218 vput(vp);
1219 vp = (struct vnode *)obj->handle;
1220 /*
1221 * Bypass filesystems obey the mpsafety of the
1222 * underlying fs. Tmpfs never bypasses.
1223 */
1224 error = vget(vp, locktype, td);
1225 if (error != 0)
1226 return (error);
1227 }
1228 if (locktype == LK_EXCLUSIVE) {
1229 *writecounted = TRUE;
1230 vnode_pager_update_writecount(obj, 0, objsize);
1231 }
1232 } else {
1233 error = EINVAL;
1234 goto done;
1235 }
1236 if ((error = VOP_GETATTR(vp, &va, cred)))
1237 goto done;
1238 #ifdef MAC
1239 /* This relies on VM_PROT_* matching PROT_*. */
1240 error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1241 if (error != 0)
1242 goto done;
1243 #endif
1244 if ((flags & MAP_SHARED) != 0) {
1245 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1246 if (prot & VM_PROT_WRITE) {
1247 error = EPERM;
1248 goto done;
1249 }
1250 *maxprotp &= ~VM_PROT_WRITE;
1251 }
1252 }
1253 /*
1254 * If it is a regular file without any references
1255 * we do not need to sync it.
1256 * Adjust object size to be the size of actual file.
1257 */
1258 objsize = round_page(va.va_size);
1259 if (va.va_nlink == 0)
1260 flags |= MAP_NOSYNC;
1261 if (obj->type == OBJT_VNODE) {
1262 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1263 cred);
1264 if (obj == NULL) {
1265 error = ENOMEM;
1266 goto done;
1267 }
1268 } else {
1269 KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
1270 ("wrong object type"));
1271 VM_OBJECT_WLOCK(obj);
1272 vm_object_reference_locked(obj);
1273 #if VM_NRESERVLEVEL > 0
1274 vm_object_color(obj, 0);
1275 #endif
1276 VM_OBJECT_WUNLOCK(obj);
1277 }
1278 *objp = obj;
1279 *flagsp = flags;
1280
1281 vfs_mark_atime(vp, cred);
1282
1283 done:
1284 if (error != 0 && *writecounted) {
1285 *writecounted = FALSE;
1286 vnode_pager_update_writecount(obj, objsize, 0);
1287 }
1288 vput(vp);
1289 return (error);
1290 }
1291
1292 /*
1293 * vm_mmap_cdev()
1294 *
1295 * Helper function for vm_mmap. Perform sanity check specific for mmap
1296 * operations on cdevs.
1297 */
1298 int
1299 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1300 vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
1301 vm_ooffset_t *foff, vm_object_t *objp)
1302 {
1303 vm_object_t obj;
1304 int error, flags;
1305
1306 flags = *flagsp;
1307
1308 if (dsw->d_flags & D_MMAP_ANON) {
1309 *objp = NULL;
1310 *foff = 0;
1311 *maxprotp = VM_PROT_ALL;
1312 *flagsp |= MAP_ANON;
1313 return (0);
1314 }
1315 /*
1316 * cdevs do not provide private mappings of any kind.
1317 */
1318 if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1319 (prot & VM_PROT_WRITE) != 0)
1320 return (EACCES);
1321 if (flags & (MAP_PRIVATE|MAP_COPY))
1322 return (EINVAL);
1323 /*
1324 * Force device mappings to be shared.
1325 */
1326 flags |= MAP_SHARED;
1327 #ifdef MAC_XXX
1328 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
1329 if (error != 0)
1330 return (error);
1331 #endif
1332 /*
1333 * First, try d_mmap_single(). If that is not implemented
1334 * (returns ENODEV), fall back to using the device pager.
1335 * Note that d_mmap_single() must return a reference to the
1336 * object (it needs to bump the reference count of the object
1337 * it returns somehow).
1338 *
1339 * XXX assumes VM_PROT_* == PROT_*
1340 */
1341 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1342 if (error != ENODEV)
1343 return (error);
1344 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1345 td->td_ucred);
1346 if (obj == NULL)
1347 return (EINVAL);
1348 *objp = obj;
1349 *flagsp = flags;
1350 return (0);
1351 }
1352
1353 /*
1354 * vm_mmap()
1355 *
1356 * Internal version of mmap used by exec, sys5 shared memory, and
1357 * various device drivers. Handle is either a vnode pointer, a
1358 * character device, or NULL for MAP_ANON.
1359 */
1360 int
1361 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1362 vm_prot_t maxprot, int flags,
1363 objtype_t handle_type, void *handle,
1364 vm_ooffset_t foff)
1365 {
1366 vm_object_t object;
1367 struct thread *td = curthread;
1368 int error;
1369 boolean_t writecounted;
1370
1371 if (size == 0)
1372 return (EINVAL);
1373
1374 size = round_page(size);
1375 object = NULL;
1376 writecounted = FALSE;
1377
1378 /*
1379 * Lookup/allocate object.
1380 */
1381 switch (handle_type) {
1382 case OBJT_DEVICE: {
1383 struct cdevsw *dsw;
1384 struct cdev *cdev;
1385 int ref;
1386
1387 cdev = handle;
1388 dsw = dev_refthread(cdev, &ref);
1389 if (dsw == NULL)
1390 return (ENXIO);
1391 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
1392 dsw, &foff, &object);
1393 dev_relthread(cdev, ref);
1394 break;
1395 }
1396 case OBJT_VNODE:
1397 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1398 handle, &foff, &object, &writecounted);
1399 break;
1400 case OBJT_DEFAULT:
1401 if (handle == NULL) {
1402 error = 0;
1403 break;
1404 }
1405 /* FALLTHROUGH */
1406 default:
1407 error = EINVAL;
1408 break;
1409 }
1410 if (error)
1411 return (error);
1412
1413 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1414 foff, writecounted, td);
1415 if (error != 0 && object != NULL) {
1416 /*
1417 * If this mapping was accounted for in the vnode's
1418 * writecount, then undo that now.
1419 */
1420 if (writecounted)
1421 vnode_pager_release_writecount(object, 0, size);
1422 vm_object_deallocate(object);
1423 }
1424 return (error);
1425 }
1426
1427 /*
1428 * Internal version of mmap that maps a specific VM object into an
1429 * map. Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1430 */
1431 int
1432 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1433 vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
1434 boolean_t writecounted, struct thread *td)
1435 {
1436 boolean_t curmap, fitit;
1437 vm_offset_t max_addr;
1438 int docow, error, findspace, rv;
1439
1440 curmap = map == &td->td_proc->p_vmspace->vm_map;
1441 if (curmap) {
1442 PROC_LOCK(td->td_proc);
1443 if (map->size + size > lim_cur_proc(td->td_proc, RLIMIT_VMEM)) {
1444 PROC_UNLOCK(td->td_proc);
1445 return (ENOMEM);
1446 }
1447 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1448 PROC_UNLOCK(td->td_proc);
1449 return (ENOMEM);
1450 }
1451 if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1452 if (ptoa(pmap_wired_count(map->pmap)) + size >
1453 lim_cur_proc(td->td_proc, RLIMIT_MEMLOCK)) {
1454 racct_set_force(td->td_proc, RACCT_VMEM,
1455 map->size);
1456 PROC_UNLOCK(td->td_proc);
1457 return (ENOMEM);
1458 }
1459 error = racct_set(td->td_proc, RACCT_MEMLOCK,
1460 ptoa(pmap_wired_count(map->pmap)) + size);
1461 if (error != 0) {
1462 racct_set_force(td->td_proc, RACCT_VMEM,
1463 map->size);
1464 PROC_UNLOCK(td->td_proc);
1465 return (error);
1466 }
1467 }
1468 PROC_UNLOCK(td->td_proc);
1469 }
1470
1471 /*
1472 * We currently can only deal with page aligned file offsets.
1473 * The mmap() system call already enforces this by subtracting
1474 * the page offset from the file offset, but checking here
1475 * catches errors in device drivers (e.g. d_single_mmap()
1476 * callbacks) and other internal mapping requests (such as in
1477 * exec).
1478 */
1479 if (foff & PAGE_MASK)
1480 return (EINVAL);
1481
1482 if ((flags & MAP_FIXED) == 0) {
1483 fitit = TRUE;
1484 *addr = round_page(*addr);
1485 } else {
1486 if (*addr != trunc_page(*addr))
1487 return (EINVAL);
1488 fitit = FALSE;
1489 }
1490
1491 if (flags & MAP_ANON) {
1492 if (object != NULL || foff != 0)
1493 return (EINVAL);
1494 docow = 0;
1495 } else if (flags & MAP_PREFAULT_READ)
1496 docow = MAP_PREFAULT;
1497 else
1498 docow = MAP_PREFAULT_PARTIAL;
1499
1500 if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1501 docow |= MAP_COPY_ON_WRITE;
1502 if (flags & MAP_NOSYNC)
1503 docow |= MAP_DISABLE_SYNCER;
1504 if (flags & MAP_NOCORE)
1505 docow |= MAP_DISABLE_COREDUMP;
1506 /* Shared memory is also shared with children. */
1507 if (flags & MAP_SHARED)
1508 docow |= MAP_INHERIT_SHARE;
1509 if (writecounted)
1510 docow |= MAP_VN_WRITECOUNT;
1511 if (flags & MAP_STACK) {
1512 if (object != NULL)
1513 return (EINVAL);
1514 docow |= MAP_STACK_GROWS_DOWN;
1515 }
1516 if ((flags & MAP_EXCL) != 0)
1517 docow |= MAP_CHECK_EXCL;
1518 if ((flags & MAP_GUARD) != 0)
1519 docow |= MAP_CREATE_GUARD;
1520
1521 if (fitit) {
1522 if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
1523 findspace = VMFS_SUPER_SPACE;
1524 else if ((flags & MAP_ALIGNMENT_MASK) != 0)
1525 findspace = VMFS_ALIGNED_SPACE(flags >>
1526 MAP_ALIGNMENT_SHIFT);
1527 else
1528 findspace = VMFS_OPTIMAL_SPACE;
1529 max_addr = 0;
1530 #ifdef MAP_32BIT
1531 if ((flags & MAP_32BIT) != 0)
1532 max_addr = MAP_32BIT_MAX_ADDR;
1533 #endif
1534 if (curmap) {
1535 rv = vm_map_find_min(map, object, foff, addr, size,
1536 round_page((vm_offset_t)td->td_proc->p_vmspace->
1537 vm_daddr + lim_max(td, RLIMIT_DATA)), max_addr,
1538 findspace, prot, maxprot, docow);
1539 } else {
1540 rv = vm_map_find(map, object, foff, addr, size,
1541 max_addr, findspace, prot, maxprot, docow);
1542 }
1543 } else {
1544 rv = vm_map_fixed(map, object, foff, *addr, size,
1545 prot, maxprot, docow);
1546 }
1547
1548 if (rv == KERN_SUCCESS) {
1549 /*
1550 * If the process has requested that all future mappings
1551 * be wired, then heed this.
1552 */
1553 if (map->flags & MAP_WIREFUTURE) {
1554 vm_map_wire(map, *addr, *addr + size,
1555 VM_MAP_WIRE_USER | ((flags & MAP_STACK) ?
1556 VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES));
1557 }
1558 }
1559 return (vm_mmap_to_errno(rv));
1560 }
1561
1562 /*
1563 * Translate a Mach VM return code to zero on success or the appropriate errno
1564 * on failure.
1565 */
1566 int
1567 vm_mmap_to_errno(int rv)
1568 {
1569
1570 switch (rv) {
1571 case KERN_SUCCESS:
1572 return (0);
1573 case KERN_INVALID_ADDRESS:
1574 case KERN_NO_SPACE:
1575 return (ENOMEM);
1576 case KERN_PROTECTION_FAILURE:
1577 return (EACCES);
1578 default:
1579 return (EINVAL);
1580 }
1581 }
Cache object: 3d94119245161e8d7f295498ab806b1f
|