FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_mmap.c
1 /*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
35 *
36 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
37 */
38
39 /*
40 * Mapped file (mmap) interface to VM
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/8.3/sys/vm/vm_mmap.c 226763 2011-10-25 23:19:57Z alc $");
45
46 #include "opt_compat.h"
47 #include "opt_hwpmc_hooks.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/sysproto.h>
54 #include <sys/filedesc.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/resource.h>
58 #include <sys/resourcevar.h>
59 #include <sys/vnode.h>
60 #include <sys/fcntl.h>
61 #include <sys/file.h>
62 #include <sys/mman.h>
63 #include <sys/mount.h>
64 #include <sys/conf.h>
65 #include <sys/stat.h>
66 #include <sys/sysent.h>
67 #include <sys/vmmeter.h>
68
69 #include <security/mac/mac_framework.h>
70
71 #include <vm/vm.h>
72 #include <vm/vm_param.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pager.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_page.h>
81
82 #ifdef HWPMC_HOOKS
83 #include <sys/pmckern.h>
84 #endif
85
86 #ifndef _SYS_SYSPROTO_H_
87 struct sbrk_args {
88 int incr;
89 };
90 #endif
91
92 static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
93 int *, struct vnode *, vm_ooffset_t *, vm_object_t *);
94 static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
95 int *, struct cdev *, vm_ooffset_t *, vm_object_t *);
96 static int vm_mmap_shm(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
97 int *, struct shmfd *, vm_ooffset_t, vm_object_t *);
98
99 /*
100 * MPSAFE
101 */
102 /* ARGSUSED */
103 int
104 sbrk(td, uap)
105 struct thread *td;
106 struct sbrk_args *uap;
107 {
108 /* Not yet implemented */
109 return (EOPNOTSUPP);
110 }
111
112 #ifndef _SYS_SYSPROTO_H_
113 struct sstk_args {
114 int incr;
115 };
116 #endif
117
118 /*
119 * MPSAFE
120 */
121 /* ARGSUSED */
122 int
123 sstk(td, uap)
124 struct thread *td;
125 struct sstk_args *uap;
126 {
127 /* Not yet implemented */
128 return (EOPNOTSUPP);
129 }
130
131 #if defined(COMPAT_43)
132 #ifndef _SYS_SYSPROTO_H_
133 struct getpagesize_args {
134 int dummy;
135 };
136 #endif
137
138 /* ARGSUSED */
139 int
140 ogetpagesize(td, uap)
141 struct thread *td;
142 struct getpagesize_args *uap;
143 {
144 /* MP SAFE */
145 td->td_retval[0] = PAGE_SIZE;
146 return (0);
147 }
148 #endif /* COMPAT_43 */
149
150
151 /*
152 * Memory Map (mmap) system call. Note that the file offset
153 * and address are allowed to be NOT page aligned, though if
154 * the MAP_FIXED flag it set, both must have the same remainder
155 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not
156 * page-aligned, the actual mapping starts at trunc_page(addr)
157 * and the return value is adjusted up by the page offset.
158 *
159 * Generally speaking, only character devices which are themselves
160 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
161 * there would be no cache coherency between a descriptor and a VM mapping
162 * both to the same character device.
163 */
164 #ifndef _SYS_SYSPROTO_H_
165 struct mmap_args {
166 void *addr;
167 size_t len;
168 int prot;
169 int flags;
170 int fd;
171 long pad;
172 off_t pos;
173 };
174 #endif
175
176 /*
177 * MPSAFE
178 */
179 int
180 mmap(td, uap)
181 struct thread *td;
182 struct mmap_args *uap;
183 {
184 #ifdef HWPMC_HOOKS
185 struct pmckern_map_in pkm;
186 #endif
187 struct file *fp;
188 struct vnode *vp;
189 vm_offset_t addr;
190 vm_size_t size, pageoff;
191 vm_prot_t prot, maxprot;
192 void *handle;
193 objtype_t handle_type;
194 int flags, error;
195 off_t pos;
196 struct vmspace *vms = td->td_proc->p_vmspace;
197
198 addr = (vm_offset_t) uap->addr;
199 size = uap->len;
200 prot = uap->prot & VM_PROT_ALL;
201 flags = uap->flags;
202 pos = uap->pos;
203
204 fp = NULL;
205 /* make sure mapping fits into numeric range etc */
206 if ((uap->len == 0 && !SV_CURPROC_FLAG(SV_AOUT) &&
207 curproc->p_osrel >= P_OSREL_MAP_ANON) ||
208 ((flags & MAP_ANON) && (uap->fd != -1 || pos != 0)))
209 return (EINVAL);
210
211 if (flags & MAP_STACK) {
212 if ((uap->fd != -1) ||
213 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
214 return (EINVAL);
215 flags |= MAP_ANON;
216 pos = 0;
217 }
218
219 /*
220 * Align the file position to a page boundary,
221 * and save its page offset component.
222 */
223 pageoff = (pos & PAGE_MASK);
224 pos -= pageoff;
225
226 /* Adjust size for rounding (on both ends). */
227 size += pageoff; /* low end... */
228 size = (vm_size_t) round_page(size); /* hi end */
229
230 /*
231 * Check for illegal addresses. Watch out for address wrap... Note
232 * that VM_*_ADDRESS are not constants due to casts (argh).
233 */
234 if (flags & MAP_FIXED) {
235 /*
236 * The specified address must have the same remainder
237 * as the file offset taken modulo PAGE_SIZE, so it
238 * should be aligned after adjustment by pageoff.
239 */
240 addr -= pageoff;
241 if (addr & PAGE_MASK)
242 return (EINVAL);
243 /* Address range must be all in user VM space. */
244 if (addr < vm_map_min(&vms->vm_map) ||
245 addr + size > vm_map_max(&vms->vm_map))
246 return (EINVAL);
247 if (addr + size < addr)
248 return (EINVAL);
249 } else {
250 /*
251 * XXX for non-fixed mappings where no hint is provided or
252 * the hint would fall in the potential heap space,
253 * place it after the end of the largest possible heap.
254 *
255 * There should really be a pmap call to determine a reasonable
256 * location.
257 */
258 PROC_LOCK(td->td_proc);
259 if (addr == 0 ||
260 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
261 addr < round_page((vm_offset_t)vms->vm_daddr +
262 lim_max(td->td_proc, RLIMIT_DATA))))
263 addr = round_page((vm_offset_t)vms->vm_daddr +
264 lim_max(td->td_proc, RLIMIT_DATA));
265 PROC_UNLOCK(td->td_proc);
266 }
267 if (flags & MAP_ANON) {
268 /*
269 * Mapping blank space is trivial.
270 */
271 handle = NULL;
272 handle_type = OBJT_DEFAULT;
273 maxprot = VM_PROT_ALL;
274 } else {
275 /*
276 * Mapping file, get fp for validation and
277 * don't let the descriptor disappear on us if we block.
278 */
279 if ((error = fget(td, uap->fd, &fp)) != 0)
280 goto done;
281 if (fp->f_type == DTYPE_SHM) {
282 handle = fp->f_data;
283 handle_type = OBJT_SWAP;
284 maxprot = VM_PROT_NONE;
285
286 /* FREAD should always be set. */
287 if (fp->f_flag & FREAD)
288 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
289 if (fp->f_flag & FWRITE)
290 maxprot |= VM_PROT_WRITE;
291 goto map;
292 }
293 if (fp->f_type != DTYPE_VNODE) {
294 error = ENODEV;
295 goto done;
296 }
297 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \
298 defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4)
299 /*
300 * POSIX shared-memory objects are defined to have
301 * kernel persistence, and are not defined to support
302 * read(2)/write(2) -- or even open(2). Thus, we can
303 * use MAP_ASYNC to trade on-disk coherence for speed.
304 * The shm_open(3) library routine turns on the FPOSIXSHM
305 * flag to request this behavior.
306 */
307 if (fp->f_flag & FPOSIXSHM)
308 flags |= MAP_NOSYNC;
309 #endif
310 vp = fp->f_vnode;
311 /*
312 * Ensure that file and memory protections are
313 * compatible. Note that we only worry about
314 * writability if mapping is shared; in this case,
315 * current and max prot are dictated by the open file.
316 * XXX use the vnode instead? Problem is: what
317 * credentials do we use for determination? What if
318 * proc does a setuid?
319 */
320 if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC)
321 maxprot = VM_PROT_NONE;
322 else
323 maxprot = VM_PROT_EXECUTE;
324 if (fp->f_flag & FREAD) {
325 maxprot |= VM_PROT_READ;
326 } else if (prot & PROT_READ) {
327 error = EACCES;
328 goto done;
329 }
330 /*
331 * If we are sharing potential changes (either via
332 * MAP_SHARED or via the implicit sharing of character
333 * device mappings), and we are trying to get write
334 * permission although we opened it without asking
335 * for it, bail out.
336 */
337 if ((flags & MAP_SHARED) != 0) {
338 if ((fp->f_flag & FWRITE) != 0) {
339 maxprot |= VM_PROT_WRITE;
340 } else if ((prot & PROT_WRITE) != 0) {
341 error = EACCES;
342 goto done;
343 }
344 } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) {
345 maxprot |= VM_PROT_WRITE;
346 }
347 handle = (void *)vp;
348 handle_type = OBJT_VNODE;
349 }
350 map:
351 td->td_fpop = fp;
352 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
353 flags, handle_type, handle, pos);
354 td->td_fpop = NULL;
355 #ifdef HWPMC_HOOKS
356 /* inform hwpmc(4) if an executable is being mapped */
357 if (error == 0 && handle_type == OBJT_VNODE &&
358 (prot & PROT_EXEC)) {
359 pkm.pm_file = handle;
360 pkm.pm_address = (uintptr_t) addr;
361 PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm);
362 }
363 #endif
364 if (error == 0)
365 td->td_retval[0] = (register_t) (addr + pageoff);
366 done:
367 if (fp)
368 fdrop(fp, td);
369
370 return (error);
371 }
372
373 int
374 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
375 {
376 struct mmap_args oargs;
377
378 oargs.addr = uap->addr;
379 oargs.len = uap->len;
380 oargs.prot = uap->prot;
381 oargs.flags = uap->flags;
382 oargs.fd = uap->fd;
383 oargs.pos = uap->pos;
384 return (mmap(td, &oargs));
385 }
386
387 #ifdef COMPAT_43
388 #ifndef _SYS_SYSPROTO_H_
389 struct ommap_args {
390 caddr_t addr;
391 int len;
392 int prot;
393 int flags;
394 int fd;
395 long pos;
396 };
397 #endif
398 int
399 ommap(td, uap)
400 struct thread *td;
401 struct ommap_args *uap;
402 {
403 struct mmap_args nargs;
404 static const char cvtbsdprot[8] = {
405 0,
406 PROT_EXEC,
407 PROT_WRITE,
408 PROT_EXEC | PROT_WRITE,
409 PROT_READ,
410 PROT_EXEC | PROT_READ,
411 PROT_WRITE | PROT_READ,
412 PROT_EXEC | PROT_WRITE | PROT_READ,
413 };
414
415 #define OMAP_ANON 0x0002
416 #define OMAP_COPY 0x0020
417 #define OMAP_SHARED 0x0010
418 #define OMAP_FIXED 0x0100
419
420 nargs.addr = uap->addr;
421 nargs.len = uap->len;
422 nargs.prot = cvtbsdprot[uap->prot & 0x7];
423 nargs.flags = 0;
424 if (uap->flags & OMAP_ANON)
425 nargs.flags |= MAP_ANON;
426 if (uap->flags & OMAP_COPY)
427 nargs.flags |= MAP_COPY;
428 if (uap->flags & OMAP_SHARED)
429 nargs.flags |= MAP_SHARED;
430 else
431 nargs.flags |= MAP_PRIVATE;
432 if (uap->flags & OMAP_FIXED)
433 nargs.flags |= MAP_FIXED;
434 nargs.fd = uap->fd;
435 nargs.pos = uap->pos;
436 return (mmap(td, &nargs));
437 }
438 #endif /* COMPAT_43 */
439
440
441 #ifndef _SYS_SYSPROTO_H_
442 struct msync_args {
443 void *addr;
444 size_t len;
445 int flags;
446 };
447 #endif
448 /*
449 * MPSAFE
450 */
451 int
452 msync(td, uap)
453 struct thread *td;
454 struct msync_args *uap;
455 {
456 vm_offset_t addr;
457 vm_size_t size, pageoff;
458 int flags;
459 vm_map_t map;
460 int rv;
461
462 addr = (vm_offset_t) uap->addr;
463 size = uap->len;
464 flags = uap->flags;
465
466 pageoff = (addr & PAGE_MASK);
467 addr -= pageoff;
468 size += pageoff;
469 size = (vm_size_t) round_page(size);
470 if (addr + size < addr)
471 return (EINVAL);
472
473 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
474 return (EINVAL);
475
476 map = &td->td_proc->p_vmspace->vm_map;
477
478 /*
479 * Clean the pages and interpret the return value.
480 */
481 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
482 (flags & MS_INVALIDATE) != 0);
483 switch (rv) {
484 case KERN_SUCCESS:
485 return (0);
486 case KERN_INVALID_ADDRESS:
487 return (EINVAL); /* Sun returns ENOMEM? */
488 case KERN_INVALID_ARGUMENT:
489 return (EBUSY);
490 default:
491 return (EINVAL);
492 }
493 }
494
495 #ifndef _SYS_SYSPROTO_H_
496 struct munmap_args {
497 void *addr;
498 size_t len;
499 };
500 #endif
501 /*
502 * MPSAFE
503 */
504 int
505 munmap(td, uap)
506 struct thread *td;
507 struct munmap_args *uap;
508 {
509 #ifdef HWPMC_HOOKS
510 struct pmckern_map_out pkm;
511 vm_map_entry_t entry;
512 #endif
513 vm_offset_t addr;
514 vm_size_t size, pageoff;
515 vm_map_t map;
516
517 addr = (vm_offset_t) uap->addr;
518 size = uap->len;
519 if (size == 0)
520 return (EINVAL);
521
522 pageoff = (addr & PAGE_MASK);
523 addr -= pageoff;
524 size += pageoff;
525 size = (vm_size_t) round_page(size);
526 if (addr + size < addr)
527 return (EINVAL);
528
529 /*
530 * Check for illegal addresses. Watch out for address wrap...
531 */
532 map = &td->td_proc->p_vmspace->vm_map;
533 if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
534 return (EINVAL);
535 vm_map_lock(map);
536 #ifdef HWPMC_HOOKS
537 /*
538 * Inform hwpmc if the address range being unmapped contains
539 * an executable region.
540 */
541 pkm.pm_address = (uintptr_t) NULL;
542 if (vm_map_lookup_entry(map, addr, &entry)) {
543 for (;
544 entry != &map->header && entry->start < addr + size;
545 entry = entry->next) {
546 if (vm_map_check_protection(map, entry->start,
547 entry->end, VM_PROT_EXECUTE) == TRUE) {
548 pkm.pm_address = (uintptr_t) addr;
549 pkm.pm_size = (size_t) size;
550 break;
551 }
552 }
553 }
554 #endif
555 vm_map_delete(map, addr, addr + size);
556
557 #ifdef HWPMC_HOOKS
558 /* downgrade the lock to prevent a LOR with the pmc-sx lock */
559 vm_map_lock_downgrade(map);
560 if (pkm.pm_address != (uintptr_t) NULL)
561 PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
562 vm_map_unlock_read(map);
563 #else
564 vm_map_unlock(map);
565 #endif
566 /* vm_map_delete returns nothing but KERN_SUCCESS anyway */
567 return (0);
568 }
569
570 #ifndef _SYS_SYSPROTO_H_
571 struct mprotect_args {
572 const void *addr;
573 size_t len;
574 int prot;
575 };
576 #endif
577 /*
578 * MPSAFE
579 */
580 int
581 mprotect(td, uap)
582 struct thread *td;
583 struct mprotect_args *uap;
584 {
585 vm_offset_t addr;
586 vm_size_t size, pageoff;
587 vm_prot_t prot;
588
589 addr = (vm_offset_t) uap->addr;
590 size = uap->len;
591 prot = uap->prot & VM_PROT_ALL;
592
593 pageoff = (addr & PAGE_MASK);
594 addr -= pageoff;
595 size += pageoff;
596 size = (vm_size_t) round_page(size);
597 if (addr + size < addr)
598 return (EINVAL);
599
600 switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
601 addr + size, prot, FALSE)) {
602 case KERN_SUCCESS:
603 return (0);
604 case KERN_PROTECTION_FAILURE:
605 return (EACCES);
606 case KERN_RESOURCE_SHORTAGE:
607 return (ENOMEM);
608 }
609 return (EINVAL);
610 }
611
612 #ifndef _SYS_SYSPROTO_H_
613 struct minherit_args {
614 void *addr;
615 size_t len;
616 int inherit;
617 };
618 #endif
619 /*
620 * MPSAFE
621 */
622 int
623 minherit(td, uap)
624 struct thread *td;
625 struct minherit_args *uap;
626 {
627 vm_offset_t addr;
628 vm_size_t size, pageoff;
629 vm_inherit_t inherit;
630
631 addr = (vm_offset_t)uap->addr;
632 size = uap->len;
633 inherit = uap->inherit;
634
635 pageoff = (addr & PAGE_MASK);
636 addr -= pageoff;
637 size += pageoff;
638 size = (vm_size_t) round_page(size);
639 if (addr + size < addr)
640 return (EINVAL);
641
642 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
643 addr + size, inherit)) {
644 case KERN_SUCCESS:
645 return (0);
646 case KERN_PROTECTION_FAILURE:
647 return (EACCES);
648 }
649 return (EINVAL);
650 }
651
652 #ifndef _SYS_SYSPROTO_H_
653 struct madvise_args {
654 void *addr;
655 size_t len;
656 int behav;
657 };
658 #endif
659
660 /*
661 * MPSAFE
662 */
663 /* ARGSUSED */
664 int
665 madvise(td, uap)
666 struct thread *td;
667 struct madvise_args *uap;
668 {
669 vm_offset_t start, end;
670 vm_map_t map;
671 struct proc *p;
672 int error;
673
674 /*
675 * Check for our special case, advising the swap pager we are
676 * "immortal."
677 */
678 if (uap->behav == MADV_PROTECT) {
679 error = priv_check(td, PRIV_VM_MADV_PROTECT);
680 if (error == 0) {
681 p = td->td_proc;
682 PROC_LOCK(p);
683 p->p_flag |= P_PROTECTED;
684 PROC_UNLOCK(p);
685 }
686 return (error);
687 }
688 /*
689 * Check for illegal behavior
690 */
691 if (uap->behav < 0 || uap->behav > MADV_CORE)
692 return (EINVAL);
693 /*
694 * Check for illegal addresses. Watch out for address wrap... Note
695 * that VM_*_ADDRESS are not constants due to casts (argh).
696 */
697 map = &td->td_proc->p_vmspace->vm_map;
698 if ((vm_offset_t)uap->addr < vm_map_min(map) ||
699 (vm_offset_t)uap->addr + uap->len > vm_map_max(map))
700 return (EINVAL);
701 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
702 return (EINVAL);
703
704 /*
705 * Since this routine is only advisory, we default to conservative
706 * behavior.
707 */
708 start = trunc_page((vm_offset_t) uap->addr);
709 end = round_page((vm_offset_t) uap->addr + uap->len);
710
711 if (vm_map_madvise(map, start, end, uap->behav))
712 return (EINVAL);
713 return (0);
714 }
715
716 #ifndef _SYS_SYSPROTO_H_
717 struct mincore_args {
718 const void *addr;
719 size_t len;
720 char *vec;
721 };
722 #endif
723
724 /*
725 * MPSAFE
726 */
727 /* ARGSUSED */
728 int
729 mincore(td, uap)
730 struct thread *td;
731 struct mincore_args *uap;
732 {
733 vm_offset_t addr, first_addr;
734 vm_offset_t end, cend;
735 pmap_t pmap;
736 vm_map_t map;
737 char *vec;
738 int error = 0;
739 int vecindex, lastvecindex;
740 vm_map_entry_t current;
741 vm_map_entry_t entry;
742 int mincoreinfo;
743 unsigned int timestamp;
744
745 /*
746 * Make sure that the addresses presented are valid for user
747 * mode.
748 */
749 first_addr = addr = trunc_page((vm_offset_t) uap->addr);
750 end = addr + (vm_size_t)round_page(uap->len);
751 map = &td->td_proc->p_vmspace->vm_map;
752 if (end > vm_map_max(map) || end < addr)
753 return (ENOMEM);
754
755 /*
756 * Address of byte vector
757 */
758 vec = uap->vec;
759
760 pmap = vmspace_pmap(td->td_proc->p_vmspace);
761
762 vm_map_lock_read(map);
763 RestartScan:
764 timestamp = map->timestamp;
765
766 if (!vm_map_lookup_entry(map, addr, &entry)) {
767 vm_map_unlock_read(map);
768 return (ENOMEM);
769 }
770
771 /*
772 * Do this on a map entry basis so that if the pages are not
773 * in the current processes address space, we can easily look
774 * up the pages elsewhere.
775 */
776 lastvecindex = -1;
777 for (current = entry;
778 (current != &map->header) && (current->start < end);
779 current = current->next) {
780
781 /*
782 * check for contiguity
783 */
784 if (current->end < end &&
785 (entry->next == &map->header ||
786 current->next->start > current->end)) {
787 vm_map_unlock_read(map);
788 return (ENOMEM);
789 }
790
791 /*
792 * ignore submaps (for now) or null objects
793 */
794 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
795 current->object.vm_object == NULL)
796 continue;
797
798 /*
799 * limit this scan to the current map entry and the
800 * limits for the mincore call
801 */
802 if (addr < current->start)
803 addr = current->start;
804 cend = current->end;
805 if (cend > end)
806 cend = end;
807
808 /*
809 * scan this entry one page at a time
810 */
811 while (addr < cend) {
812 /*
813 * Check pmap first, it is likely faster, also
814 * it can provide info as to whether we are the
815 * one referencing or modifying the page.
816 */
817 mincoreinfo = pmap_mincore(pmap, addr);
818 if (!mincoreinfo) {
819 vm_pindex_t pindex;
820 vm_ooffset_t offset;
821 vm_page_t m;
822 /*
823 * calculate the page index into the object
824 */
825 offset = current->offset + (addr - current->start);
826 pindex = OFF_TO_IDX(offset);
827 VM_OBJECT_LOCK(current->object.vm_object);
828 m = vm_page_lookup(current->object.vm_object,
829 pindex);
830 /*
831 * if the page is resident, then gather information about
832 * it.
833 */
834 if (m != NULL && m->valid != 0) {
835 mincoreinfo = MINCORE_INCORE;
836 vm_page_lock_queues();
837 if (m->dirty ||
838 pmap_is_modified(m))
839 mincoreinfo |= MINCORE_MODIFIED_OTHER;
840 if ((m->flags & PG_REFERENCED) ||
841 pmap_ts_referenced(m)) {
842 vm_page_flag_set(m, PG_REFERENCED);
843 mincoreinfo |= MINCORE_REFERENCED_OTHER;
844 }
845 vm_page_unlock_queues();
846 }
847 VM_OBJECT_UNLOCK(current->object.vm_object);
848 }
849
850 /*
851 * subyte may page fault. In case it needs to modify
852 * the map, we release the lock.
853 */
854 vm_map_unlock_read(map);
855
856 /*
857 * calculate index into user supplied byte vector
858 */
859 vecindex = OFF_TO_IDX(addr - first_addr);
860
861 /*
862 * If we have skipped map entries, we need to make sure that
863 * the byte vector is zeroed for those skipped entries.
864 */
865 while ((lastvecindex + 1) < vecindex) {
866 error = subyte(vec + lastvecindex, 0);
867 if (error) {
868 error = EFAULT;
869 goto done2;
870 }
871 ++lastvecindex;
872 }
873
874 /*
875 * Pass the page information to the user
876 */
877 error = subyte(vec + vecindex, mincoreinfo);
878 if (error) {
879 error = EFAULT;
880 goto done2;
881 }
882
883 /*
884 * If the map has changed, due to the subyte, the previous
885 * output may be invalid.
886 */
887 vm_map_lock_read(map);
888 if (timestamp != map->timestamp)
889 goto RestartScan;
890
891 lastvecindex = vecindex;
892 addr += PAGE_SIZE;
893 }
894 }
895
896 /*
897 * subyte may page fault. In case it needs to modify
898 * the map, we release the lock.
899 */
900 vm_map_unlock_read(map);
901
902 /*
903 * Zero the last entries in the byte vector.
904 */
905 vecindex = OFF_TO_IDX(end - first_addr);
906 while ((lastvecindex + 1) < vecindex) {
907 error = subyte(vec + lastvecindex, 0);
908 if (error) {
909 error = EFAULT;
910 goto done2;
911 }
912 ++lastvecindex;
913 }
914
915 /*
916 * If the map has changed, due to the subyte, the previous
917 * output may be invalid.
918 */
919 vm_map_lock_read(map);
920 if (timestamp != map->timestamp)
921 goto RestartScan;
922 vm_map_unlock_read(map);
923 done2:
924 return (error);
925 }
926
927 #ifndef _SYS_SYSPROTO_H_
928 struct mlock_args {
929 const void *addr;
930 size_t len;
931 };
932 #endif
933 /*
934 * MPSAFE
935 */
936 int
937 mlock(td, uap)
938 struct thread *td;
939 struct mlock_args *uap;
940 {
941 struct proc *proc;
942 vm_offset_t addr, end, last, start;
943 vm_size_t npages, size;
944 int error;
945
946 error = priv_check(td, PRIV_VM_MLOCK);
947 if (error)
948 return (error);
949 addr = (vm_offset_t)uap->addr;
950 size = uap->len;
951 last = addr + size;
952 start = trunc_page(addr);
953 end = round_page(last);
954 if (last < addr || end < addr)
955 return (EINVAL);
956 npages = atop(end - start);
957 if (npages > vm_page_max_wired)
958 return (ENOMEM);
959 proc = td->td_proc;
960 PROC_LOCK(proc);
961 if (ptoa(npages +
962 pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
963 lim_cur(proc, RLIMIT_MEMLOCK)) {
964 PROC_UNLOCK(proc);
965 return (ENOMEM);
966 }
967 PROC_UNLOCK(proc);
968 if (npages + cnt.v_wire_count > vm_page_max_wired)
969 return (EAGAIN);
970 error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
971 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
972 return (error == KERN_SUCCESS ? 0 : ENOMEM);
973 }
974
975 #ifndef _SYS_SYSPROTO_H_
976 struct mlockall_args {
977 int how;
978 };
979 #endif
980
981 /*
982 * MPSAFE
983 */
984 int
985 mlockall(td, uap)
986 struct thread *td;
987 struct mlockall_args *uap;
988 {
989 vm_map_t map;
990 int error;
991
992 map = &td->td_proc->p_vmspace->vm_map;
993 error = 0;
994
995 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
996 return (EINVAL);
997
998 #if 0
999 /*
1000 * If wiring all pages in the process would cause it to exceed
1001 * a hard resource limit, return ENOMEM.
1002 */
1003 PROC_LOCK(td->td_proc);
1004 if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) >
1005 lim_cur(td->td_proc, RLIMIT_MEMLOCK))) {
1006 PROC_UNLOCK(td->td_proc);
1007 return (ENOMEM);
1008 }
1009 PROC_UNLOCK(td->td_proc);
1010 #else
1011 error = priv_check(td, PRIV_VM_MLOCK);
1012 if (error)
1013 return (error);
1014 #endif
1015
1016 if (uap->how & MCL_FUTURE) {
1017 vm_map_lock(map);
1018 vm_map_modflags(map, MAP_WIREFUTURE, 0);
1019 vm_map_unlock(map);
1020 error = 0;
1021 }
1022
1023 if (uap->how & MCL_CURRENT) {
1024 /*
1025 * P1003.1-2001 mandates that all currently mapped pages
1026 * will be memory resident and locked (wired) upon return
1027 * from mlockall(). vm_map_wire() will wire pages, by
1028 * calling vm_fault_wire() for each page in the region.
1029 */
1030 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1031 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1032 error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1033 }
1034
1035 return (error);
1036 }
1037
1038 #ifndef _SYS_SYSPROTO_H_
1039 struct munlockall_args {
1040 register_t dummy;
1041 };
1042 #endif
1043
1044 /*
1045 * MPSAFE
1046 */
1047 int
1048 munlockall(td, uap)
1049 struct thread *td;
1050 struct munlockall_args *uap;
1051 {
1052 vm_map_t map;
1053 int error;
1054
1055 map = &td->td_proc->p_vmspace->vm_map;
1056 error = priv_check(td, PRIV_VM_MUNLOCK);
1057 if (error)
1058 return (error);
1059
1060 /* Clear the MAP_WIREFUTURE flag from this vm_map. */
1061 vm_map_lock(map);
1062 vm_map_modflags(map, 0, MAP_WIREFUTURE);
1063 vm_map_unlock(map);
1064
1065 /* Forcibly unwire all pages. */
1066 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1067 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1068
1069 return (error);
1070 }
1071
1072 #ifndef _SYS_SYSPROTO_H_
1073 struct munlock_args {
1074 const void *addr;
1075 size_t len;
1076 };
1077 #endif
1078 /*
1079 * MPSAFE
1080 */
1081 int
1082 munlock(td, uap)
1083 struct thread *td;
1084 struct munlock_args *uap;
1085 {
1086 vm_offset_t addr, end, last, start;
1087 vm_size_t size;
1088 int error;
1089
1090 error = priv_check(td, PRIV_VM_MUNLOCK);
1091 if (error)
1092 return (error);
1093 addr = (vm_offset_t)uap->addr;
1094 size = uap->len;
1095 last = addr + size;
1096 start = trunc_page(addr);
1097 end = round_page(last);
1098 if (last < addr || end < addr)
1099 return (EINVAL);
1100 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1101 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1102 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1103 }
1104
1105 /*
1106 * vm_mmap_vnode()
1107 *
1108 * MPSAFE
1109 *
1110 * Helper function for vm_mmap. Perform sanity check specific for mmap
1111 * operations on vnodes.
1112 */
1113 int
1114 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1115 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1116 struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp)
1117 {
1118 struct vattr va;
1119 vm_object_t obj;
1120 vm_offset_t foff;
1121 struct mount *mp;
1122 struct ucred *cred;
1123 int error, flags;
1124 int vfslocked;
1125
1126 mp = vp->v_mount;
1127 cred = td->td_ucred;
1128 vfslocked = VFS_LOCK_GIANT(mp);
1129 if ((error = vget(vp, LK_SHARED, td)) != 0) {
1130 VFS_UNLOCK_GIANT(vfslocked);
1131 return (error);
1132 }
1133 foff = *foffp;
1134 flags = *flagsp;
1135 obj = vp->v_object;
1136 if (vp->v_type == VREG) {
1137 /*
1138 * Get the proper underlying object
1139 */
1140 if (obj == NULL) {
1141 error = EINVAL;
1142 goto done;
1143 }
1144 if (obj->handle != vp) {
1145 vput(vp);
1146 vp = (struct vnode*)obj->handle;
1147 vget(vp, LK_SHARED, td);
1148 }
1149 } else if (vp->v_type == VCHR) {
1150 error = vm_mmap_cdev(td, objsize, prot, maxprotp, flagsp,
1151 vp->v_rdev, foffp, objp);
1152 if (error == 0)
1153 goto mark_atime;
1154 goto done;
1155 } else {
1156 error = EINVAL;
1157 goto done;
1158 }
1159 if ((error = VOP_GETATTR(vp, &va, cred)))
1160 goto done;
1161 #ifdef MAC
1162 error = mac_vnode_check_mmap(cred, vp, prot, flags);
1163 if (error != 0)
1164 goto done;
1165 #endif
1166 if ((flags & MAP_SHARED) != 0) {
1167 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1168 if (prot & PROT_WRITE) {
1169 error = EPERM;
1170 goto done;
1171 }
1172 *maxprotp &= ~VM_PROT_WRITE;
1173 }
1174 }
1175 /*
1176 * If it is a regular file without any references
1177 * we do not need to sync it.
1178 * Adjust object size to be the size of actual file.
1179 */
1180 objsize = round_page(va.va_size);
1181 if (va.va_nlink == 0)
1182 flags |= MAP_NOSYNC;
1183 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred);
1184 if (obj == NULL) {
1185 error = ENOMEM;
1186 goto done;
1187 }
1188 *objp = obj;
1189 *flagsp = flags;
1190
1191 mark_atime:
1192 vfs_mark_atime(vp, cred);
1193
1194 done:
1195 vput(vp);
1196 VFS_UNLOCK_GIANT(vfslocked);
1197 return (error);
1198 }
1199
1200 /*
1201 * vm_mmap_cdev()
1202 *
1203 * MPSAFE
1204 *
1205 * Helper function for vm_mmap. Perform sanity check specific for mmap
1206 * operations on cdevs.
1207 */
1208 int
1209 vm_mmap_cdev(struct thread *td, vm_size_t objsize,
1210 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1211 struct cdev *cdev, vm_ooffset_t *foff, vm_object_t *objp)
1212 {
1213 vm_object_t obj;
1214 struct cdevsw *dsw;
1215 int error, flags, ref;
1216
1217 flags = *flagsp;
1218
1219 dsw = dev_refthread(cdev, &ref);
1220 if (dsw == NULL)
1221 return (ENXIO);
1222 if (dsw->d_flags & D_MMAP_ANON) {
1223 dev_relthread(cdev, ref);
1224 *maxprotp = VM_PROT_ALL;
1225 *flagsp |= MAP_ANON;
1226 return (0);
1227 }
1228 /*
1229 * cdevs do not provide private mappings of any kind.
1230 */
1231 if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1232 (prot & PROT_WRITE) != 0) {
1233 dev_relthread(cdev, ref);
1234 return (EACCES);
1235 }
1236 if (flags & (MAP_PRIVATE|MAP_COPY)) {
1237 dev_relthread(cdev, ref);
1238 return (EINVAL);
1239 }
1240 /*
1241 * Force device mappings to be shared.
1242 */
1243 flags |= MAP_SHARED;
1244 #ifdef MAC_XXX
1245 error = mac_cdev_check_mmap(td->td_ucred, cdev, prot);
1246 if (error != 0) {
1247 dev_relthread(cdev, ref);
1248 return (error);
1249 }
1250 #endif
1251 /*
1252 * First, try d_mmap_single(). If that is not implemented
1253 * (returns ENODEV), fall back to using the device pager.
1254 * Note that d_mmap_single() must return a reference to the
1255 * object (it needs to bump the reference count of the object
1256 * it returns somehow).
1257 *
1258 * XXX assumes VM_PROT_* == PROT_*
1259 */
1260 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1261 dev_relthread(cdev, ref);
1262 if (error != ENODEV)
1263 return (error);
1264 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1265 td->td_ucred);
1266 if (obj == NULL)
1267 return (EINVAL);
1268 *objp = obj;
1269 *flagsp = flags;
1270 return (0);
1271 }
1272
1273 /*
1274 * vm_mmap_shm()
1275 *
1276 * MPSAFE
1277 *
1278 * Helper function for vm_mmap. Perform sanity check specific for mmap
1279 * operations on shm file descriptors.
1280 */
1281 int
1282 vm_mmap_shm(struct thread *td, vm_size_t objsize,
1283 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1284 struct shmfd *shmfd, vm_ooffset_t foff, vm_object_t *objp)
1285 {
1286 int error;
1287
1288 if ((*flagsp & MAP_SHARED) != 0 &&
1289 (*maxprotp & VM_PROT_WRITE) == 0 &&
1290 (prot & PROT_WRITE) != 0)
1291 return (EACCES);
1292 #ifdef MAC
1293 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, *flagsp);
1294 if (error != 0)
1295 return (error);
1296 #endif
1297 error = shm_mmap(shmfd, objsize, foff, objp);
1298 if (error)
1299 return (error);
1300 return (0);
1301 }
1302
1303 /*
1304 * vm_mmap()
1305 *
1306 * MPSAFE
1307 *
1308 * Internal version of mmap. Currently used by mmap, exec, and sys5
1309 * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON.
1310 */
1311 int
1312 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1313 vm_prot_t maxprot, int flags,
1314 objtype_t handle_type, void *handle,
1315 vm_ooffset_t foff)
1316 {
1317 boolean_t fitit;
1318 vm_object_t object = NULL;
1319 int rv = KERN_SUCCESS;
1320 int docow, error;
1321 struct thread *td = curthread;
1322
1323 if (size == 0)
1324 return (0);
1325
1326 size = round_page(size);
1327
1328 PROC_LOCK(td->td_proc);
1329 if (td->td_proc->p_vmspace->vm_map.size + size >
1330 lim_cur(td->td_proc, RLIMIT_VMEM)) {
1331 PROC_UNLOCK(td->td_proc);
1332 return (ENOMEM);
1333 }
1334 PROC_UNLOCK(td->td_proc);
1335
1336 /*
1337 * We currently can only deal with page aligned file offsets.
1338 * The check is here rather than in the syscall because the
1339 * kernel calls this function internally for other mmaping
1340 * operations (such as in exec) and non-aligned offsets will
1341 * cause pmap inconsistencies...so we want to be sure to
1342 * disallow this in all cases.
1343 */
1344 if (foff & PAGE_MASK)
1345 return (EINVAL);
1346
1347 if ((flags & MAP_FIXED) == 0) {
1348 fitit = TRUE;
1349 *addr = round_page(*addr);
1350 } else {
1351 if (*addr != trunc_page(*addr))
1352 return (EINVAL);
1353 fitit = FALSE;
1354 }
1355 /*
1356 * Lookup/allocate object.
1357 */
1358 switch (handle_type) {
1359 case OBJT_DEVICE:
1360 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags,
1361 handle, &foff, &object);
1362 break;
1363 case OBJT_VNODE:
1364 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1365 handle, &foff, &object);
1366 break;
1367 case OBJT_SWAP:
1368 error = vm_mmap_shm(td, size, prot, &maxprot, &flags,
1369 handle, foff, &object);
1370 break;
1371 case OBJT_DEFAULT:
1372 if (handle == NULL) {
1373 error = 0;
1374 break;
1375 }
1376 /* FALLTHROUGH */
1377 default:
1378 error = EINVAL;
1379 break;
1380 }
1381 if (error)
1382 return (error);
1383 if (flags & MAP_ANON) {
1384 object = NULL;
1385 docow = 0;
1386 /*
1387 * Unnamed anonymous regions always start at 0.
1388 */
1389 if (handle == 0)
1390 foff = 0;
1391 } else if (flags & MAP_PREFAULT_READ)
1392 docow = MAP_PREFAULT;
1393 else
1394 docow = MAP_PREFAULT_PARTIAL;
1395
1396 if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1397 docow |= MAP_COPY_ON_WRITE;
1398 if (flags & MAP_NOSYNC)
1399 docow |= MAP_DISABLE_SYNCER;
1400 if (flags & MAP_NOCORE)
1401 docow |= MAP_DISABLE_COREDUMP;
1402
1403 if (flags & MAP_STACK)
1404 rv = vm_map_stack(map, *addr, size, prot, maxprot,
1405 docow | MAP_STACK_GROWS_DOWN);
1406 else if (fitit)
1407 rv = vm_map_find(map, object, foff, addr, size,
1408 object != NULL && object->type == OBJT_DEVICE ?
1409 VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, prot, maxprot, docow);
1410 else
1411 rv = vm_map_fixed(map, object, foff, *addr, size,
1412 prot, maxprot, docow);
1413
1414 if (rv != KERN_SUCCESS) {
1415 /*
1416 * Lose the object reference. Will destroy the
1417 * object if it's an unnamed anonymous mapping
1418 * or named anonymous without other references.
1419 */
1420 vm_object_deallocate(object);
1421 } else if (flags & MAP_SHARED) {
1422 /*
1423 * Shared memory is also shared with children.
1424 */
1425 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
1426 if (rv != KERN_SUCCESS)
1427 (void) vm_map_remove(map, *addr, *addr + size);
1428 }
1429
1430 /*
1431 * If the process has requested that all future mappings
1432 * be wired, then heed this.
1433 */
1434 if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE))
1435 vm_map_wire(map, *addr, *addr + size,
1436 VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
1437
1438 return (vm_mmap_to_errno(rv));
1439 }
1440
1441 int
1442 vm_mmap_to_errno(int rv)
1443 {
1444
1445 switch (rv) {
1446 case KERN_SUCCESS:
1447 return (0);
1448 case KERN_INVALID_ADDRESS:
1449 case KERN_NO_SPACE:
1450 return (ENOMEM);
1451 case KERN_PROTECTION_FAILURE:
1452 return (EACCES);
1453 default:
1454 return (EINVAL);
1455 }
1456 }
Cache object: 0d978ebb72df010de5fc7832bf7a0e17
|