The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_mmap.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1988 University of Utah.
    3  * Copyright (c) 1991, 1993
    4  *      The Regents of the University of California.  All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * the Systems Programming Group of the University of Utah Computer
    8  * Science Department.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 4. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
   35  *
   36  *      @(#)vm_mmap.c   8.4 (Berkeley) 1/12/94
   37  */
   38 
   39 /*
   40  * Mapped file (mmap) interface to VM
   41  */
   42 
   43 #include <sys/cdefs.h>
   44 __FBSDID("$FreeBSD: releng/6.4/sys/vm/vm_mmap.c 183304 2008-09-23 16:04:17Z jhb $");
   45 
   46 #include "opt_compat.h"
   47 #include "opt_mac.h"
   48 
   49 #include <sys/param.h>
   50 #include <sys/systm.h>
   51 #include <sys/kernel.h>
   52 #include <sys/lock.h>
   53 #include <sys/mutex.h>
   54 #include <sys/sysproto.h>
   55 #include <sys/filedesc.h>
   56 #include <sys/proc.h>
   57 #include <sys/resource.h>
   58 #include <sys/resourcevar.h>
   59 #include <sys/vnode.h>
   60 #include <sys/fcntl.h>
   61 #include <sys/file.h>
   62 #include <sys/mac.h>
   63 #include <sys/mman.h>
   64 #include <sys/mount.h>
   65 #include <sys/conf.h>
   66 #include <sys/stat.h>
   67 #include <sys/vmmeter.h>
   68 #include <sys/sysctl.h>
   69 
   70 #include <vm/vm.h>
   71 #include <vm/vm_param.h>
   72 #include <vm/pmap.h>
   73 #include <vm/vm_map.h>
   74 #include <vm/vm_object.h>
   75 #include <vm/vm_page.h>
   76 #include <vm/vm_pager.h>
   77 #include <vm/vm_pageout.h>
   78 #include <vm/vm_extern.h>
   79 #include <vm/vm_page.h>
   80 #include <vm/vm_kern.h>
   81 
   82 #ifndef _SYS_SYSPROTO_H_
   83 struct sbrk_args {
   84         int incr;
   85 };
   86 #endif
   87 
   88 static int max_proc_mmap;
   89 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, "");
   90 
   91 /*
   92  * Set the maximum number of vm_map_entry structures per process.  Roughly
   93  * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100
   94  * of our KVM malloc space still results in generous limits.  We want a
   95  * default that is good enough to prevent the kernel running out of resources
   96  * if attacked from compromised user account but generous enough such that
   97  * multi-threaded processes are not unduly inconvenienced.
   98  */
   99 static void vmmapentry_rsrc_init(void *);
  100 SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL)
  101 
  102 static void
  103 vmmapentry_rsrc_init(dummy)
  104         void *dummy;
  105 {
  106     max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry);
  107     max_proc_mmap /= 100;
  108 }
  109 
  110 static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
  111     int *, struct vnode *, vm_ooffset_t, vm_object_t *);
  112 static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
  113     int *, struct cdev *, vm_ooffset_t, vm_object_t *);
  114 
  115 /*
  116  * MPSAFE
  117  */
  118 /* ARGSUSED */
  119 int
  120 sbrk(td, uap)
  121         struct thread *td;
  122         struct sbrk_args *uap;
  123 {
  124         /* Not yet implemented */
  125         return (EOPNOTSUPP);
  126 }
  127 
  128 #ifndef _SYS_SYSPROTO_H_
  129 struct sstk_args {
  130         int incr;
  131 };
  132 #endif
  133 
  134 /*
  135  * MPSAFE
  136  */
  137 /* ARGSUSED */
  138 int
  139 sstk(td, uap)
  140         struct thread *td;
  141         struct sstk_args *uap;
  142 {
  143         /* Not yet implemented */
  144         return (EOPNOTSUPP);
  145 }
  146 
  147 #if defined(COMPAT_43)
  148 #ifndef _SYS_SYSPROTO_H_
  149 struct getpagesize_args {
  150         int dummy;
  151 };
  152 #endif
  153 
  154 /* ARGSUSED */
  155 int
  156 ogetpagesize(td, uap)
  157         struct thread *td;
  158         struct getpagesize_args *uap;
  159 {
  160         /* MP SAFE */
  161         td->td_retval[0] = PAGE_SIZE;
  162         return (0);
  163 }
  164 #endif                          /* COMPAT_43 */
  165 
  166 
  167 /*
  168  * Memory Map (mmap) system call.  Note that the file offset
  169  * and address are allowed to be NOT page aligned, though if
  170  * the MAP_FIXED flag it set, both must have the same remainder
  171  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
  172  * page-aligned, the actual mapping starts at trunc_page(addr)
  173  * and the return value is adjusted up by the page offset.
  174  *
  175  * Generally speaking, only character devices which are themselves
  176  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
  177  * there would be no cache coherency between a descriptor and a VM mapping
  178  * both to the same character device.
  179  *
  180  * Block devices can be mmap'd no matter what they represent.  Cache coherency
  181  * is maintained as long as you do not write directly to the underlying
  182  * character device.
  183  */
  184 #ifndef _SYS_SYSPROTO_H_
  185 struct mmap_args {
  186         void *addr;
  187         size_t len;
  188         int prot;
  189         int flags;
  190         int fd;
  191         long pad;
  192         off_t pos;
  193 };
  194 #endif
  195 
  196 /*
  197  * MPSAFE
  198  */
  199 int
  200 mmap(td, uap)
  201         struct thread *td;
  202         struct mmap_args *uap;
  203 {
  204         struct file *fp;
  205         struct vnode *vp;
  206         vm_offset_t addr;
  207         vm_size_t size, pageoff;
  208         vm_prot_t prot, maxprot;
  209         void *handle;
  210         objtype_t handle_type;
  211         int flags, error;
  212         off_t pos;
  213         struct vmspace *vms = td->td_proc->p_vmspace;
  214 
  215         addr = (vm_offset_t) uap->addr;
  216         size = uap->len;
  217         prot = uap->prot & VM_PROT_ALL;
  218         flags = uap->flags;
  219         pos = uap->pos;
  220 
  221         fp = NULL;
  222         /* make sure mapping fits into numeric range etc */
  223         if ((ssize_t) uap->len < 0 ||
  224             ((flags & MAP_ANON) && uap->fd != -1))
  225                 return (EINVAL);
  226 
  227         if (flags & MAP_STACK) {
  228                 if ((uap->fd != -1) ||
  229                     ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
  230                         return (EINVAL);
  231                 flags |= MAP_ANON;
  232                 pos = 0;
  233         }
  234 
  235         /*
  236          * Align the file position to a page boundary,
  237          * and save its page offset component.
  238          */
  239         pageoff = (pos & PAGE_MASK);
  240         pos -= pageoff;
  241 
  242         /* Adjust size for rounding (on both ends). */
  243         size += pageoff;                        /* low end... */
  244         size = (vm_size_t) round_page(size);    /* hi end */
  245 
  246         /*
  247          * Check for illegal addresses.  Watch out for address wrap... Note
  248          * that VM_*_ADDRESS are not constants due to casts (argh).
  249          */
  250         if (flags & MAP_FIXED) {
  251                 /*
  252                  * The specified address must have the same remainder
  253                  * as the file offset taken modulo PAGE_SIZE, so it
  254                  * should be aligned after adjustment by pageoff.
  255                  */
  256                 addr -= pageoff;
  257                 if (addr & PAGE_MASK)
  258                         return (EINVAL);
  259                 /* Address range must be all in user VM space. */
  260                 if (addr < vm_map_min(&vms->vm_map) ||
  261                     addr + size > vm_map_max(&vms->vm_map))
  262                         return (EINVAL);
  263                 if (addr + size < addr)
  264                         return (EINVAL);
  265         } else {
  266         /*
  267          * XXX for non-fixed mappings where no hint is provided or
  268          * the hint would fall in the potential heap space,
  269          * place it after the end of the largest possible heap.
  270          *
  271          * There should really be a pmap call to determine a reasonable
  272          * location.
  273          */
  274                 PROC_LOCK(td->td_proc);
  275                 if (addr == 0 ||
  276                     (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
  277                     addr < round_page((vm_offset_t)vms->vm_daddr +
  278                     lim_max(td->td_proc, RLIMIT_DATA))))
  279                         addr = round_page((vm_offset_t)vms->vm_daddr +
  280                             lim_max(td->td_proc, RLIMIT_DATA));
  281                 PROC_UNLOCK(td->td_proc);
  282         }
  283         if (flags & MAP_ANON) {
  284                 /*
  285                  * Mapping blank space is trivial.
  286                  */
  287                 handle = NULL;
  288                 handle_type = OBJT_DEFAULT;
  289                 maxprot = VM_PROT_ALL;
  290                 pos = 0;
  291         } else {
  292                 /*
  293                  * Mapping file, get fp for validation. Obtain vnode and make
  294                  * sure it is of appropriate type.
  295                  * don't let the descriptor disappear on us if we block
  296                  */
  297                 if ((error = fget(td, uap->fd, &fp)) != 0)
  298                         goto done;
  299                 if (fp->f_type != DTYPE_VNODE) {
  300                         error = EINVAL;
  301                         goto done;
  302                 }
  303                 /*
  304                  * POSIX shared-memory objects are defined to have
  305                  * kernel persistence, and are not defined to support
  306                  * read(2)/write(2) -- or even open(2).  Thus, we can
  307                  * use MAP_ASYNC to trade on-disk coherence for speed.
  308                  * The shm_open(3) library routine turns on the FPOSIXSHM
  309                  * flag to request this behavior.
  310                  */
  311                 if (fp->f_flag & FPOSIXSHM)
  312                         flags |= MAP_NOSYNC;
  313                 vp = fp->f_vnode;
  314                 /*
  315                  * Ensure that file and memory protections are
  316                  * compatible.  Note that we only worry about
  317                  * writability if mapping is shared; in this case,
  318                  * current and max prot are dictated by the open file.
  319                  * XXX use the vnode instead?  Problem is: what
  320                  * credentials do we use for determination? What if
  321                  * proc does a setuid?
  322                  */
  323                 if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC)
  324                         maxprot = VM_PROT_NONE;
  325                 else
  326                         maxprot = VM_PROT_EXECUTE;
  327                 if (fp->f_flag & FREAD) {
  328                         maxprot |= VM_PROT_READ;
  329                 } else if (prot & PROT_READ) {
  330                         error = EACCES;
  331                         goto done;
  332                 }
  333                 /*
  334                  * If we are sharing potential changes (either via
  335                  * MAP_SHARED or via the implicit sharing of character
  336                  * device mappings), and we are trying to get write
  337                  * permission although we opened it without asking
  338                  * for it, bail out.
  339                  */
  340                 if ((flags & MAP_SHARED) != 0) {
  341                         if ((fp->f_flag & FWRITE) != 0) {
  342                                 maxprot |= VM_PROT_WRITE;
  343                         } else if ((prot & PROT_WRITE) != 0) {
  344                                 error = EACCES;
  345                                 goto done;
  346                         }
  347                 } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) {
  348                         maxprot |= VM_PROT_WRITE;
  349                 }
  350                 handle = (void *)vp;
  351                 handle_type = OBJT_VNODE;
  352         }
  353 
  354         /*
  355          * Do not allow more then a certain number of vm_map_entry structures
  356          * per process.  Scale with the number of rforks sharing the map
  357          * to make the limit reasonable for threads.
  358          */
  359         if (max_proc_mmap &&
  360             vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) {
  361                 error = ENOMEM;
  362                 goto done;
  363         }
  364 
  365         error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
  366             flags, handle_type, handle, pos);
  367         if (error == 0)
  368                 td->td_retval[0] = (register_t) (addr + pageoff);
  369 done:
  370         if (fp)
  371                 fdrop(fp, td);
  372 
  373         return (error);
  374 }
  375 
  376 #ifdef COMPAT_43
  377 #ifndef _SYS_SYSPROTO_H_
  378 struct ommap_args {
  379         caddr_t addr;
  380         int len;
  381         int prot;
  382         int flags;
  383         int fd;
  384         long pos;
  385 };
  386 #endif
  387 int
  388 ommap(td, uap)
  389         struct thread *td;
  390         struct ommap_args *uap;
  391 {
  392         struct mmap_args nargs;
  393         static const char cvtbsdprot[8] = {
  394                 0,
  395                 PROT_EXEC,
  396                 PROT_WRITE,
  397                 PROT_EXEC | PROT_WRITE,
  398                 PROT_READ,
  399                 PROT_EXEC | PROT_READ,
  400                 PROT_WRITE | PROT_READ,
  401                 PROT_EXEC | PROT_WRITE | PROT_READ,
  402         };
  403 
  404 #define OMAP_ANON       0x0002
  405 #define OMAP_COPY       0x0020
  406 #define OMAP_SHARED     0x0010
  407 #define OMAP_FIXED      0x0100
  408 
  409         nargs.addr = uap->addr;
  410         nargs.len = uap->len;
  411         nargs.prot = cvtbsdprot[uap->prot & 0x7];
  412         nargs.flags = 0;
  413         if (uap->flags & OMAP_ANON)
  414                 nargs.flags |= MAP_ANON;
  415         if (uap->flags & OMAP_COPY)
  416                 nargs.flags |= MAP_COPY;
  417         if (uap->flags & OMAP_SHARED)
  418                 nargs.flags |= MAP_SHARED;
  419         else
  420                 nargs.flags |= MAP_PRIVATE;
  421         if (uap->flags & OMAP_FIXED)
  422                 nargs.flags |= MAP_FIXED;
  423         nargs.fd = uap->fd;
  424         nargs.pos = uap->pos;
  425         return (mmap(td, &nargs));
  426 }
  427 #endif                          /* COMPAT_43 */
  428 
  429 
  430 #ifndef _SYS_SYSPROTO_H_
  431 struct msync_args {
  432         void *addr;
  433         int len;
  434         int flags;
  435 };
  436 #endif
  437 /*
  438  * MPSAFE
  439  */
  440 int
  441 msync(td, uap)
  442         struct thread *td;
  443         struct msync_args *uap;
  444 {
  445         vm_offset_t addr;
  446         vm_size_t size, pageoff;
  447         int flags;
  448         vm_map_t map;
  449         int rv;
  450 
  451         addr = (vm_offset_t) uap->addr;
  452         size = uap->len;
  453         flags = uap->flags;
  454 
  455         pageoff = (addr & PAGE_MASK);
  456         addr -= pageoff;
  457         size += pageoff;
  458         size = (vm_size_t) round_page(size);
  459         if (addr + size < addr)
  460                 return (EINVAL);
  461 
  462         if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
  463                 return (EINVAL);
  464 
  465         map = &td->td_proc->p_vmspace->vm_map;
  466 
  467         /*
  468          * Clean the pages and interpret the return value.
  469          */
  470         rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
  471             (flags & MS_INVALIDATE) != 0);
  472         switch (rv) {
  473         case KERN_SUCCESS:
  474                 return (0);
  475         case KERN_INVALID_ADDRESS:
  476                 return (EINVAL);        /* Sun returns ENOMEM? */
  477         case KERN_INVALID_ARGUMENT:
  478                 return (EBUSY);
  479         default:
  480                 return (EINVAL);
  481         }
  482 }
  483 
  484 #ifndef _SYS_SYSPROTO_H_
  485 struct munmap_args {
  486         void *addr;
  487         size_t len;
  488 };
  489 #endif
  490 /*
  491  * MPSAFE
  492  */
  493 int
  494 munmap(td, uap)
  495         struct thread *td;
  496         struct munmap_args *uap;
  497 {
  498         vm_offset_t addr;
  499         vm_size_t size, pageoff;
  500         vm_map_t map;
  501 
  502         addr = (vm_offset_t) uap->addr;
  503         size = uap->len;
  504         if (size == 0)
  505                 return (EINVAL);
  506 
  507         pageoff = (addr & PAGE_MASK);
  508         addr -= pageoff;
  509         size += pageoff;
  510         size = (vm_size_t) round_page(size);
  511         if (addr + size < addr)
  512                 return (EINVAL);
  513 
  514         /*
  515          * Check for illegal addresses.  Watch out for address wrap...
  516          */
  517         map = &td->td_proc->p_vmspace->vm_map;
  518         if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
  519                 return (EINVAL);
  520         vm_map_lock(map);
  521         /* returns nothing but KERN_SUCCESS anyway */
  522         vm_map_delete(map, addr, addr + size);
  523         vm_map_unlock(map);
  524         return (0);
  525 }
  526 
  527 #ifndef _SYS_SYSPROTO_H_
  528 struct mprotect_args {
  529         const void *addr;
  530         size_t len;
  531         int prot;
  532 };
  533 #endif
  534 /*
  535  * MPSAFE
  536  */
  537 int
  538 mprotect(td, uap)
  539         struct thread *td;
  540         struct mprotect_args *uap;
  541 {
  542         vm_offset_t addr;
  543         vm_size_t size, pageoff;
  544         vm_prot_t prot;
  545 
  546         addr = (vm_offset_t) uap->addr;
  547         size = uap->len;
  548         prot = uap->prot & VM_PROT_ALL;
  549 #if defined(VM_PROT_READ_IS_EXEC)
  550         if (prot & VM_PROT_READ)
  551                 prot |= VM_PROT_EXECUTE;
  552 #endif
  553 
  554         pageoff = (addr & PAGE_MASK);
  555         addr -= pageoff;
  556         size += pageoff;
  557         size = (vm_size_t) round_page(size);
  558         if (addr + size < addr)
  559                 return (EINVAL);
  560 
  561         switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
  562             addr + size, prot, FALSE)) {
  563         case KERN_SUCCESS:
  564                 return (0);
  565         case KERN_PROTECTION_FAILURE:
  566                 return (EACCES);
  567         }
  568         return (EINVAL);
  569 }
  570 
  571 #ifndef _SYS_SYSPROTO_H_
  572 struct minherit_args {
  573         void *addr;
  574         size_t len;
  575         int inherit;
  576 };
  577 #endif
  578 /*
  579  * MPSAFE
  580  */
  581 int
  582 minherit(td, uap)
  583         struct thread *td;
  584         struct minherit_args *uap;
  585 {
  586         vm_offset_t addr;
  587         vm_size_t size, pageoff;
  588         vm_inherit_t inherit;
  589 
  590         addr = (vm_offset_t)uap->addr;
  591         size = uap->len;
  592         inherit = uap->inherit;
  593 
  594         pageoff = (addr & PAGE_MASK);
  595         addr -= pageoff;
  596         size += pageoff;
  597         size = (vm_size_t) round_page(size);
  598         if (addr + size < addr)
  599                 return (EINVAL);
  600 
  601         switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
  602             addr + size, inherit)) {
  603         case KERN_SUCCESS:
  604                 return (0);
  605         case KERN_PROTECTION_FAILURE:
  606                 return (EACCES);
  607         }
  608         return (EINVAL);
  609 }
  610 
  611 #ifndef _SYS_SYSPROTO_H_
  612 struct madvise_args {
  613         void *addr;
  614         size_t len;
  615         int behav;
  616 };
  617 #endif
  618 
  619 /*
  620  * MPSAFE
  621  */
  622 /* ARGSUSED */
  623 int
  624 madvise(td, uap)
  625         struct thread *td;
  626         struct madvise_args *uap;
  627 {
  628         vm_offset_t start, end;
  629         vm_map_t map;
  630         struct proc *p;
  631         int error;
  632 
  633         /*
  634          * Check for our special case, advising the swap pager we are
  635          * "immortal."
  636          */
  637         if (uap->behav == MADV_PROTECT) {
  638                 error = suser(td);
  639                 if (error == 0) {
  640                         p = td->td_proc;
  641                         PROC_LOCK(p);
  642                         p->p_flag |= P_PROTECTED;
  643                         PROC_UNLOCK(p);
  644                 }
  645                 return (error);
  646         }
  647         /*
  648          * Check for illegal behavior
  649          */
  650         if (uap->behav < 0 || uap->behav > MADV_CORE)
  651                 return (EINVAL);
  652         /*
  653          * Check for illegal addresses.  Watch out for address wrap... Note
  654          * that VM_*_ADDRESS are not constants due to casts (argh).
  655          */
  656         map = &td->td_proc->p_vmspace->vm_map;
  657         if ((vm_offset_t)uap->addr < vm_map_min(map) ||
  658             (vm_offset_t)uap->addr + uap->len > vm_map_max(map))
  659                 return (EINVAL);
  660         if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
  661                 return (EINVAL);
  662 
  663         /*
  664          * Since this routine is only advisory, we default to conservative
  665          * behavior.
  666          */
  667         start = trunc_page((vm_offset_t) uap->addr);
  668         end = round_page((vm_offset_t) uap->addr + uap->len);
  669 
  670         if (vm_map_madvise(map, start, end, uap->behav))
  671                 return (EINVAL);
  672         return (0);
  673 }
  674 
  675 #ifndef _SYS_SYSPROTO_H_
  676 struct mincore_args {
  677         const void *addr;
  678         size_t len;
  679         char *vec;
  680 };
  681 #endif
  682 
  683 /*
  684  * MPSAFE
  685  */
  686 /* ARGSUSED */
  687 int
  688 mincore(td, uap)
  689         struct thread *td;
  690         struct mincore_args *uap;
  691 {
  692         vm_offset_t addr, first_addr;
  693         vm_offset_t end, cend;
  694         pmap_t pmap;
  695         vm_map_t map;
  696         char *vec;
  697         int error = 0;
  698         int vecindex, lastvecindex;
  699         vm_map_entry_t current;
  700         vm_map_entry_t entry;
  701         int mincoreinfo;
  702         unsigned int timestamp;
  703 
  704         /*
  705          * Make sure that the addresses presented are valid for user
  706          * mode.
  707          */
  708         first_addr = addr = trunc_page((vm_offset_t) uap->addr);
  709         end = addr + (vm_size_t)round_page(uap->len);
  710         map = &td->td_proc->p_vmspace->vm_map;
  711         if (end > vm_map_max(map) || end < addr)
  712                 return (ENOMEM);
  713 
  714         /*
  715          * Address of byte vector
  716          */
  717         vec = uap->vec;
  718 
  719         pmap = vmspace_pmap(td->td_proc->p_vmspace);
  720 
  721         vm_map_lock_read(map);
  722 RestartScan:
  723         timestamp = map->timestamp;
  724 
  725         if (!vm_map_lookup_entry(map, addr, &entry)) {
  726                 vm_map_unlock_read(map);
  727                 return (ENOMEM);
  728         }
  729 
  730         /*
  731          * Do this on a map entry basis so that if the pages are not
  732          * in the current processes address space, we can easily look
  733          * up the pages elsewhere.
  734          */
  735         lastvecindex = -1;
  736         for (current = entry;
  737             (current != &map->header) && (current->start < end);
  738             current = current->next) {
  739 
  740                 /*
  741                  * check for contiguity
  742                  */
  743                 if (current->end < end &&
  744                     (entry->next == &map->header ||
  745                      current->next->start > current->end)) {
  746                         vm_map_unlock_read(map);
  747                         return (ENOMEM);
  748                 }
  749 
  750                 /*
  751                  * ignore submaps (for now) or null objects
  752                  */
  753                 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
  754                         current->object.vm_object == NULL)
  755                         continue;
  756 
  757                 /*
  758                  * limit this scan to the current map entry and the
  759                  * limits for the mincore call
  760                  */
  761                 if (addr < current->start)
  762                         addr = current->start;
  763                 cend = current->end;
  764                 if (cend > end)
  765                         cend = end;
  766 
  767                 /*
  768                  * scan this entry one page at a time
  769                  */
  770                 while (addr < cend) {
  771                         /*
  772                          * Check pmap first, it is likely faster, also
  773                          * it can provide info as to whether we are the
  774                          * one referencing or modifying the page.
  775                          */
  776                         mincoreinfo = pmap_mincore(pmap, addr);
  777                         if (!mincoreinfo) {
  778                                 vm_pindex_t pindex;
  779                                 vm_ooffset_t offset;
  780                                 vm_page_t m;
  781                                 /*
  782                                  * calculate the page index into the object
  783                                  */
  784                                 offset = current->offset + (addr - current->start);
  785                                 pindex = OFF_TO_IDX(offset);
  786                                 VM_OBJECT_LOCK(current->object.vm_object);
  787                                 m = vm_page_lookup(current->object.vm_object,
  788                                         pindex);
  789                                 /*
  790                                  * if the page is resident, then gather information about
  791                                  * it.
  792                                  */
  793                                 if (m != NULL && m->valid != 0) {
  794                                         mincoreinfo = MINCORE_INCORE;
  795                                         vm_page_lock_queues();
  796                                         if (m->dirty ||
  797                                                 pmap_is_modified(m))
  798                                                 mincoreinfo |= MINCORE_MODIFIED_OTHER;
  799                                         if ((m->flags & PG_REFERENCED) ||
  800                                                 pmap_ts_referenced(m)) {
  801                                                 vm_page_flag_set(m, PG_REFERENCED);
  802                                                 mincoreinfo |= MINCORE_REFERENCED_OTHER;
  803                                         }
  804                                         vm_page_unlock_queues();
  805                                 }
  806                                 VM_OBJECT_UNLOCK(current->object.vm_object);
  807                         }
  808 
  809                         /*
  810                          * subyte may page fault.  In case it needs to modify
  811                          * the map, we release the lock.
  812                          */
  813                         vm_map_unlock_read(map);
  814 
  815                         /*
  816                          * calculate index into user supplied byte vector
  817                          */
  818                         vecindex = OFF_TO_IDX(addr - first_addr);
  819 
  820                         /*
  821                          * If we have skipped map entries, we need to make sure that
  822                          * the byte vector is zeroed for those skipped entries.
  823                          */
  824                         while ((lastvecindex + 1) < vecindex) {
  825                                 error = subyte(vec + lastvecindex, 0);
  826                                 if (error) {
  827                                         error = EFAULT;
  828                                         goto done2;
  829                                 }
  830                                 ++lastvecindex;
  831                         }
  832 
  833                         /*
  834                          * Pass the page information to the user
  835                          */
  836                         error = subyte(vec + vecindex, mincoreinfo);
  837                         if (error) {
  838                                 error = EFAULT;
  839                                 goto done2;
  840                         }
  841 
  842                         /*
  843                          * If the map has changed, due to the subyte, the previous
  844                          * output may be invalid.
  845                          */
  846                         vm_map_lock_read(map);
  847                         if (timestamp != map->timestamp)
  848                                 goto RestartScan;
  849 
  850                         lastvecindex = vecindex;
  851                         addr += PAGE_SIZE;
  852                 }
  853         }
  854 
  855         /*
  856          * subyte may page fault.  In case it needs to modify
  857          * the map, we release the lock.
  858          */
  859         vm_map_unlock_read(map);
  860 
  861         /*
  862          * Zero the last entries in the byte vector.
  863          */
  864         vecindex = OFF_TO_IDX(end - first_addr);
  865         while ((lastvecindex + 1) < vecindex) {
  866                 error = subyte(vec + lastvecindex, 0);
  867                 if (error) {
  868                         error = EFAULT;
  869                         goto done2;
  870                 }
  871                 ++lastvecindex;
  872         }
  873 
  874         /*
  875          * If the map has changed, due to the subyte, the previous
  876          * output may be invalid.
  877          */
  878         vm_map_lock_read(map);
  879         if (timestamp != map->timestamp)
  880                 goto RestartScan;
  881         vm_map_unlock_read(map);
  882 done2:
  883         return (error);
  884 }
  885 
  886 #ifndef _SYS_SYSPROTO_H_
  887 struct mlock_args {
  888         const void *addr;
  889         size_t len;
  890 };
  891 #endif
  892 /*
  893  * MPSAFE
  894  */
  895 int
  896 mlock(td, uap)
  897         struct thread *td;
  898         struct mlock_args *uap;
  899 {
  900         struct proc *proc;
  901         vm_offset_t addr, end, last, start;
  902         vm_size_t npages, size;
  903         int error;
  904 
  905         error = suser(td);
  906         if (error)
  907                 return (error);
  908         addr = (vm_offset_t)uap->addr;
  909         size = uap->len;
  910         last = addr + size;
  911         start = trunc_page(addr);
  912         end = round_page(last);
  913         if (last < addr || end < addr)
  914                 return (EINVAL);
  915         npages = atop(end - start);
  916         if (npages > vm_page_max_wired)
  917                 return (ENOMEM);
  918         proc = td->td_proc;
  919         PROC_LOCK(proc);
  920         if (ptoa(npages +
  921             pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
  922             lim_cur(proc, RLIMIT_MEMLOCK)) {
  923                 PROC_UNLOCK(proc);
  924                 return (ENOMEM);
  925         }
  926         PROC_UNLOCK(proc);
  927         if (npages + cnt.v_wire_count > vm_page_max_wired)
  928                 return (EAGAIN);
  929         error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
  930             VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
  931         return (error == KERN_SUCCESS ? 0 : ENOMEM);
  932 }
  933 
  934 #ifndef _SYS_SYSPROTO_H_
  935 struct mlockall_args {
  936         int     how;
  937 };
  938 #endif
  939 
  940 /*
  941  * MPSAFE
  942  */
  943 int
  944 mlockall(td, uap)
  945         struct thread *td;
  946         struct mlockall_args *uap;
  947 {
  948         vm_map_t map;
  949         int error;
  950 
  951         map = &td->td_proc->p_vmspace->vm_map;
  952         error = 0;
  953 
  954         if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
  955                 return (EINVAL);
  956 
  957 #if 0
  958         /*
  959          * If wiring all pages in the process would cause it to exceed
  960          * a hard resource limit, return ENOMEM.
  961          */
  962         PROC_LOCK(td->td_proc);
  963         if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) >
  964                 lim_cur(td->td_proc, RLIMIT_MEMLOCK))) {
  965                 PROC_UNLOCK(td->td_proc);
  966                 return (ENOMEM);
  967         }
  968         PROC_UNLOCK(td->td_proc);
  969 #else
  970         error = suser(td);
  971         if (error)
  972                 return (error);
  973 #endif
  974 
  975         if (uap->how & MCL_FUTURE) {
  976                 vm_map_lock(map);
  977                 vm_map_modflags(map, MAP_WIREFUTURE, 0);
  978                 vm_map_unlock(map);
  979                 error = 0;
  980         }
  981 
  982         if (uap->how & MCL_CURRENT) {
  983                 /*
  984                  * P1003.1-2001 mandates that all currently mapped pages
  985                  * will be memory resident and locked (wired) upon return
  986                  * from mlockall(). vm_map_wire() will wire pages, by
  987                  * calling vm_fault_wire() for each page in the region.
  988                  */
  989                 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
  990                     VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
  991                 error = (error == KERN_SUCCESS ? 0 : EAGAIN);
  992         }
  993 
  994         return (error);
  995 }
  996 
  997 #ifndef _SYS_SYSPROTO_H_
  998 struct munlockall_args {
  999         register_t dummy;
 1000 };
 1001 #endif
 1002 
 1003 /*
 1004  * MPSAFE
 1005  */
 1006 int
 1007 munlockall(td, uap)
 1008         struct thread *td;
 1009         struct munlockall_args *uap;
 1010 {
 1011         vm_map_t map;
 1012         int error;
 1013 
 1014         map = &td->td_proc->p_vmspace->vm_map;
 1015         error = suser(td);
 1016         if (error)
 1017                 return (error);
 1018 
 1019         /* Clear the MAP_WIREFUTURE flag from this vm_map. */
 1020         vm_map_lock(map);
 1021         vm_map_modflags(map, 0, MAP_WIREFUTURE);
 1022         vm_map_unlock(map);
 1023 
 1024         /* Forcibly unwire all pages. */
 1025         error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
 1026             VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
 1027 
 1028         return (error);
 1029 }
 1030 
 1031 #ifndef _SYS_SYSPROTO_H_
 1032 struct munlock_args {
 1033         const void *addr;
 1034         size_t len;
 1035 };
 1036 #endif
 1037 /*
 1038  * MPSAFE
 1039  */
 1040 int
 1041 munlock(td, uap)
 1042         struct thread *td;
 1043         struct munlock_args *uap;
 1044 {
 1045         vm_offset_t addr, end, last, start;
 1046         vm_size_t size;
 1047         int error;
 1048 
 1049         error = suser(td);
 1050         if (error)
 1051                 return (error);
 1052         addr = (vm_offset_t)uap->addr;
 1053         size = uap->len;
 1054         last = addr + size;
 1055         start = trunc_page(addr);
 1056         end = round_page(last);
 1057         if (last < addr || end < addr)
 1058                 return (EINVAL);
 1059         error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
 1060             VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
 1061         return (error == KERN_SUCCESS ? 0 : ENOMEM);
 1062 }
 1063 
 1064 /*
 1065  * vm_mmap_vnode()
 1066  *
 1067  * MPSAFE
 1068  *
 1069  * Helper function for vm_mmap.  Perform sanity check specific for mmap
 1070  * operations on vnodes.
 1071  */
 1072 int
 1073 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
 1074     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
 1075     struct vnode *vp, vm_ooffset_t foff, vm_object_t *objp)
 1076 {
 1077         struct vattr va;
 1078         void *handle;
 1079         vm_object_t obj;
 1080         struct mount *mp;
 1081         struct cdevsw *dsw;
 1082         int error, flags, type;
 1083         int vfslocked;
 1084 
 1085         mp = vp->v_mount;
 1086         vfslocked = VFS_LOCK_GIANT(mp);
 1087         if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) {
 1088                 VFS_UNLOCK_GIANT(vfslocked);
 1089                 return (error);
 1090         }
 1091         flags = *flagsp;
 1092         obj = vp->v_object;
 1093         if (vp->v_type == VREG) {
 1094                 /*
 1095                  * Get the proper underlying object
 1096                  */
 1097                 if (obj == NULL) {
 1098                         error = EINVAL;
 1099                         goto done;
 1100                 }
 1101                 if (obj->handle != vp) {
 1102                         vput(vp);
 1103                         vp = (struct vnode*)obj->handle;
 1104                         vget(vp, LK_EXCLUSIVE, td);
 1105                 }
 1106                 type = OBJT_VNODE;
 1107                 handle = vp;
 1108         } else if (vp->v_type == VCHR) {
 1109                 type = OBJT_DEVICE;
 1110                 handle = vp->v_rdev;
 1111 
 1112                 dsw = dev_refthread(handle);
 1113                 if (dsw == NULL) {
 1114                         error = ENXIO;
 1115                         goto done;
 1116                 }
 1117                 if (dsw->d_flags & D_MMAP_ANON) {
 1118                         dev_relthread(handle);
 1119                         *maxprotp = VM_PROT_ALL;
 1120                         *flagsp |= MAP_ANON;
 1121                         error = 0;
 1122                         goto done;
 1123                 }
 1124                 dev_relthread(handle);
 1125                 /*
 1126                  * cdevs does not provide private mappings of any kind.
 1127                  */
 1128                 if ((*maxprotp & VM_PROT_WRITE) == 0 &&
 1129                     (prot & PROT_WRITE) != 0) {
 1130                         error = EACCES;
 1131                         goto done;
 1132                 }
 1133                 if (flags & (MAP_PRIVATE|MAP_COPY)) {
 1134                         error = EINVAL;
 1135                         goto done;
 1136                 }
 1137                 /*
 1138                  * Force device mappings to be shared.
 1139                  */
 1140                 flags |= MAP_SHARED;
 1141         } else {
 1142                 error = EINVAL;
 1143                 goto done;
 1144         }
 1145         if ((error = VOP_GETATTR(vp, &va, td->td_ucred, td))) {
 1146                 goto done;
 1147         }
 1148 #ifdef MAC
 1149         error = mac_check_vnode_mmap(td->td_ucred, vp, prot, flags);
 1150         if (error != 0)
 1151                 goto done;
 1152 #endif
 1153         if ((flags & MAP_SHARED) != 0) {
 1154                 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
 1155                         if (prot & PROT_WRITE) {
 1156                                 error = EPERM;
 1157                                 goto done;
 1158                         }
 1159                         *maxprotp &= ~VM_PROT_WRITE;
 1160                 }
 1161         }
 1162         /*
 1163          * If it is a regular file without any references
 1164          * we do not need to sync it.
 1165          * Adjust object size to be the size of actual file.
 1166          */
 1167         if (vp->v_type == VREG) {
 1168                 objsize = round_page(va.va_size);
 1169                 if (va.va_nlink == 0)
 1170                         flags |= MAP_NOSYNC;
 1171         }
 1172         obj = vm_pager_allocate(type, handle, objsize, prot, foff);
 1173         if (obj == NULL) {
 1174                 error = (type == OBJT_DEVICE ? EINVAL : ENOMEM);
 1175                 goto done;
 1176         }
 1177         *objp = obj;
 1178         *flagsp = flags;
 1179         vfs_mark_atime(vp, td);
 1180 
 1181 done:
 1182         vput(vp);
 1183         VFS_UNLOCK_GIANT(vfslocked);
 1184         return (error);
 1185 }
 1186 
 1187 /*
 1188  * vm_mmap_cdev()
 1189  *
 1190  * MPSAFE
 1191  *
 1192  * Helper function for vm_mmap.  Perform sanity check specific for mmap
 1193  * operations on cdevs.
 1194  */
 1195 int
 1196 vm_mmap_cdev(struct thread *td, vm_size_t objsize,
 1197     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
 1198     struct cdev *cdev, vm_ooffset_t foff, vm_object_t *objp)
 1199 {
 1200         vm_object_t obj;
 1201         struct cdevsw *dsw;
 1202         int flags;
 1203 
 1204         flags = *flagsp;
 1205 
 1206         dsw = dev_refthread(cdev);
 1207         if (dsw == NULL)
 1208                 return (ENXIO);
 1209         if (dsw->d_flags & D_MMAP_ANON) {
 1210                 dev_relthread(cdev);
 1211                 *maxprotp = VM_PROT_ALL;
 1212                 *flagsp |= MAP_ANON;
 1213                 return (0);
 1214         }
 1215         dev_relthread(cdev);
 1216         /*
 1217          * cdevs does not provide private mappings of any kind.
 1218          */
 1219         if ((*maxprotp & VM_PROT_WRITE) == 0 &&
 1220             (prot & PROT_WRITE) != 0)
 1221                 return (EACCES);
 1222         if (flags & (MAP_PRIVATE|MAP_COPY))
 1223                 return (EINVAL);
 1224         /*
 1225          * Force device mappings to be shared.
 1226          */
 1227         flags |= MAP_SHARED;
 1228 #ifdef MAC_XXX
 1229         error = mac_check_cdev_mmap(td->td_ucred, cdev, prot);
 1230         if (error != 0)
 1231                 return (error);
 1232 #endif
 1233         obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, foff);
 1234         if (obj == NULL)
 1235                 return (EINVAL);
 1236         *objp = obj;
 1237         *flagsp = flags;
 1238         return (0);
 1239 }
 1240 
 1241 /*
 1242  * vm_mmap()
 1243  *
 1244  * MPSAFE
 1245  *
 1246  * Internal version of mmap.  Currently used by mmap, exec, and sys5
 1247  * shared memory.  Handle is either a vnode pointer or NULL for MAP_ANON.
 1248  */
 1249 int
 1250 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
 1251         vm_prot_t maxprot, int flags,
 1252         objtype_t handle_type, void *handle,
 1253         vm_ooffset_t foff)
 1254 {
 1255         boolean_t fitit;
 1256         vm_object_t object;
 1257         int rv = KERN_SUCCESS;
 1258         int docow, error;
 1259         struct thread *td = curthread;
 1260 
 1261         if (size == 0)
 1262                 return (0);
 1263 
 1264         size = round_page(size);
 1265 
 1266         PROC_LOCK(td->td_proc);
 1267         if (td->td_proc->p_vmspace->vm_map.size + size >
 1268             lim_cur(td->td_proc, RLIMIT_VMEM)) {
 1269                 PROC_UNLOCK(td->td_proc);
 1270                 return(ENOMEM);
 1271         }
 1272         PROC_UNLOCK(td->td_proc);
 1273 
 1274         /*
 1275          * We currently can only deal with page aligned file offsets.
 1276          * The check is here rather than in the syscall because the
 1277          * kernel calls this function internally for other mmaping
 1278          * operations (such as in exec) and non-aligned offsets will
 1279          * cause pmap inconsistencies...so we want to be sure to
 1280          * disallow this in all cases.
 1281          */
 1282         if (foff & PAGE_MASK)
 1283                 return (EINVAL);
 1284 
 1285         if ((flags & MAP_FIXED) == 0) {
 1286                 fitit = TRUE;
 1287                 *addr = round_page(*addr);
 1288         } else {
 1289                 if (*addr != trunc_page(*addr))
 1290                         return (EINVAL);
 1291                 fitit = FALSE;
 1292         }
 1293         /*
 1294          * Lookup/allocate object.
 1295          */
 1296         switch (handle_type) {
 1297         case OBJT_DEVICE:
 1298                 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags,
 1299                     handle, foff, &object);
 1300                 break;
 1301         case OBJT_VNODE:
 1302                 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
 1303                     handle, foff, &object);
 1304                 break;
 1305         case OBJT_DEFAULT:
 1306                 if (handle == NULL) {
 1307                         error = 0;
 1308                         break;
 1309                 }
 1310                 /* FALLTHROUGH */
 1311         default:
 1312                 error = EINVAL;
 1313         }
 1314         if (error)
 1315                 return (error);
 1316         if (flags & MAP_ANON) {
 1317                 object = NULL;
 1318                 docow = 0;
 1319                 /*
 1320                  * Unnamed anonymous regions always start at 0.
 1321                  */
 1322                 if (handle == 0)
 1323                         foff = 0;
 1324         } else {
 1325                 docow = MAP_PREFAULT_PARTIAL;
 1326         }
 1327 
 1328         if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
 1329                 docow |= MAP_COPY_ON_WRITE;
 1330         if (flags & MAP_NOSYNC)
 1331                 docow |= MAP_DISABLE_SYNCER;
 1332         if (flags & MAP_NOCORE)
 1333                 docow |= MAP_DISABLE_COREDUMP;
 1334 
 1335 #if defined(VM_PROT_READ_IS_EXEC)
 1336         if (prot & VM_PROT_READ)
 1337                 prot |= VM_PROT_EXECUTE;
 1338 
 1339         if (maxprot & VM_PROT_READ)
 1340                 maxprot |= VM_PROT_EXECUTE;
 1341 #endif
 1342 
 1343         if (fitit)
 1344                 *addr = pmap_addr_hint(object, *addr, size);
 1345 
 1346         if (flags & MAP_STACK)
 1347                 rv = vm_map_stack(map, *addr, size, prot, maxprot,
 1348                     docow | MAP_STACK_GROWS_DOWN);
 1349         else if (fitit)
 1350                 rv = vm_map_find(map, object, foff, addr, size, TRUE,
 1351                                  prot, maxprot, docow);
 1352         else
 1353                 rv = vm_map_fixed(map, object, foff, addr, size,
 1354                                  prot, maxprot, docow);
 1355 
 1356         if (rv != KERN_SUCCESS) {
 1357                 /*
 1358                  * Lose the object reference. Will destroy the
 1359                  * object if it's an unnamed anonymous mapping
 1360                  * or named anonymous without other references.
 1361                  */
 1362                 vm_object_deallocate(object);
 1363         } else if (flags & MAP_SHARED) {
 1364                 /*
 1365                  * Shared memory is also shared with children.
 1366                  */
 1367                 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
 1368                 if (rv != KERN_SUCCESS)
 1369                         (void) vm_map_remove(map, *addr, *addr + size);
 1370         }
 1371 
 1372         /*
 1373          * If the process has requested that all future mappings
 1374          * be wired, then heed this.
 1375          */
 1376         if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE))
 1377                 vm_map_wire(map, *addr, *addr + size,
 1378                     VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
 1379 
 1380         switch (rv) {
 1381         case KERN_SUCCESS:
 1382                 return (0);
 1383         case KERN_INVALID_ADDRESS:
 1384         case KERN_NO_SPACE:
 1385                 return (ENOMEM);
 1386         case KERN_PROTECTION_FAILURE:
 1387                 return (EACCES);
 1388         default:
 1389                 return (EINVAL);
 1390         }
 1391 }

Cache object: ee7abcd00e3f08320ab85202838b857d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.