The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_mmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1988 University of Utah.
    5  * Copyright (c) 1991, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
   37  *
   38  *      @(#)vm_mmap.c   8.4 (Berkeley) 1/12/94
   39  */
   40 
   41 /*
   42  * Mapped file (mmap) interface to VM
   43  */
   44 
   45 #include <sys/cdefs.h>
   46 __FBSDID("$FreeBSD$");
   47 
   48 #include "opt_hwpmc_hooks.h"
   49 #include "opt_vm.h"
   50 
   51 #include <sys/param.h>
   52 #include <sys/systm.h>
   53 #include <sys/capsicum.h>
   54 #include <sys/kernel.h>
   55 #include <sys/lock.h>
   56 #include <sys/mutex.h>
   57 #include <sys/sysproto.h>
   58 #include <sys/filedesc.h>
   59 #include <sys/priv.h>
   60 #include <sys/proc.h>
   61 #include <sys/procctl.h>
   62 #include <sys/racct.h>
   63 #include <sys/resource.h>
   64 #include <sys/resourcevar.h>
   65 #include <sys/rwlock.h>
   66 #include <sys/sysctl.h>
   67 #include <sys/vnode.h>
   68 #include <sys/fcntl.h>
   69 #include <sys/file.h>
   70 #include <sys/mman.h>
   71 #include <sys/mount.h>
   72 #include <sys/conf.h>
   73 #include <sys/stat.h>
   74 #include <sys/syscallsubr.h>
   75 #include <sys/sysent.h>
   76 #include <sys/vmmeter.h>
   77 
   78 #include <security/audit/audit.h>
   79 #include <security/mac/mac_framework.h>
   80 
   81 #include <vm/vm.h>
   82 #include <vm/vm_param.h>
   83 #include <vm/pmap.h>
   84 #include <vm/vm_map.h>
   85 #include <vm/vm_object.h>
   86 #include <vm/vm_page.h>
   87 #include <vm/vm_pager.h>
   88 #include <vm/vm_pageout.h>
   89 #include <vm/vm_extern.h>
   90 #include <vm/vm_page.h>
   91 #include <vm/vnode_pager.h>
   92 
   93 #ifdef HWPMC_HOOKS
   94 #include <sys/pmckern.h>
   95 #endif
   96 
   97 int old_mlock = 0;
   98 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
   99     "Do not apply RLIMIT_MEMLOCK on mlockall");
  100 static int mincore_mapped = 1;
  101 SYSCTL_INT(_vm, OID_AUTO, mincore_mapped, CTLFLAG_RWTUN, &mincore_mapped, 0,
  102     "mincore reports mappings, not residency");
  103 
  104 #ifdef MAP_32BIT
  105 #define MAP_32BIT_MAX_ADDR      ((vm_offset_t)1 << 31)
  106 #endif
  107 
  108 #ifndef _SYS_SYSPROTO_H_
  109 struct sbrk_args {
  110         int incr;
  111 };
  112 #endif
  113 
  114 int
  115 sys_sbrk(struct thread *td, struct sbrk_args *uap)
  116 {
  117         /* Not yet implemented */
  118         return (EOPNOTSUPP);
  119 }
  120 
  121 #ifndef _SYS_SYSPROTO_H_
  122 struct sstk_args {
  123         int incr;
  124 };
  125 #endif
  126 
  127 int
  128 sys_sstk(struct thread *td, struct sstk_args *uap)
  129 {
  130         /* Not yet implemented */
  131         return (EOPNOTSUPP);
  132 }
  133 
  134 #if defined(COMPAT_43)
  135 #ifndef _SYS_SYSPROTO_H_
  136 struct getpagesize_args {
  137         int dummy;
  138 };
  139 #endif
  140 
  141 int
  142 ogetpagesize(struct thread *td, struct getpagesize_args *uap)
  143 {
  144 
  145         td->td_retval[0] = PAGE_SIZE;
  146         return (0);
  147 }
  148 #endif                          /* COMPAT_43 */
  149 
  150 
  151 /*
  152  * Memory Map (mmap) system call.  Note that the file offset
  153  * and address are allowed to be NOT page aligned, though if
  154  * the MAP_FIXED flag it set, both must have the same remainder
  155  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
  156  * page-aligned, the actual mapping starts at trunc_page(addr)
  157  * and the return value is adjusted up by the page offset.
  158  *
  159  * Generally speaking, only character devices which are themselves
  160  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
  161  * there would be no cache coherency between a descriptor and a VM mapping
  162  * both to the same character device.
  163  */
  164 #ifndef _SYS_SYSPROTO_H_
  165 struct mmap_args {
  166         void *addr;
  167         size_t len;
  168         int prot;
  169         int flags;
  170         int fd;
  171         long pad;
  172         off_t pos;
  173 };
  174 #endif
  175 
  176 int
  177 sys_mmap(struct thread *td, struct mmap_args *uap)
  178 {
  179 
  180         return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
  181             uap->flags, uap->fd, uap->pos));
  182 }
  183 
  184 int
  185 kern_mmap(struct thread *td, uintptr_t addr0, size_t size, int prot, int flags,
  186     int fd, off_t pos)
  187 {
  188 
  189         return (kern_mmap_fpcheck(td, addr0, size, prot, flags, fd, pos, NULL));
  190 }
  191 
  192 /*
  193  * When mmap'ing a file, check_fp_fn may be used for the caller to do any
  194  * last-minute validation based on the referenced file in a non-racy way.
  195  */
  196 int
  197 kern_mmap_fpcheck(struct thread *td, uintptr_t addr0, size_t size, int prot,
  198     int flags, int fd, off_t pos, mmap_check_fp_fn check_fp_fn)
  199 {
  200         struct vmspace *vms;
  201         struct file *fp;
  202         struct proc *p;
  203         vm_offset_t addr;
  204         vm_size_t pageoff;
  205         vm_prot_t cap_maxprot;
  206         int align, error;
  207         cap_rights_t rights;
  208 
  209         p = td->td_proc;
  210         vms = p->p_vmspace;
  211         fp = NULL;
  212         AUDIT_ARG_FD(fd);
  213         addr = addr0;
  214 
  215         /*
  216          * Ignore old flags that used to be defined but did not do anything.
  217          */
  218         flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
  219         
  220         /*
  221          * Enforce the constraints.
  222          * Mapping of length 0 is only allowed for old binaries.
  223          * Anonymous mapping shall specify -1 as filedescriptor and
  224          * zero position for new code. Be nice to ancient a.out
  225          * binaries and correct pos for anonymous mapping, since old
  226          * ld.so sometimes issues anonymous map requests with non-zero
  227          * pos.
  228          */
  229         if (!SV_CURPROC_FLAG(SV_AOUT)) {
  230                 if ((size == 0 && p->p_osrel >= P_OSREL_MAP_ANON) ||
  231                     ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
  232                         return (EINVAL);
  233         } else {
  234                 if ((flags & MAP_ANON) != 0)
  235                         pos = 0;
  236         }
  237 
  238         if (flags & MAP_STACK) {
  239                 if ((fd != -1) ||
  240                     ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
  241                         return (EINVAL);
  242                 flags |= MAP_ANON;
  243                 pos = 0;
  244         }
  245         if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
  246             MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
  247             MAP_PREFAULT_READ | MAP_GUARD |
  248 #ifdef MAP_32BIT
  249             MAP_32BIT |
  250 #endif
  251             MAP_ALIGNMENT_MASK)) != 0)
  252                 return (EINVAL);
  253         if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
  254                 return (EINVAL);
  255         if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
  256                 return (EINVAL);
  257         if (prot != PROT_NONE &&
  258             (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
  259                 return (EINVAL);
  260         if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
  261             pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL |
  262 #ifdef MAP_32BIT
  263             MAP_32BIT |
  264 #endif
  265             MAP_ALIGNMENT_MASK)) != 0))
  266                 return (EINVAL);
  267 
  268         /*
  269          * Align the file position to a page boundary,
  270          * and save its page offset component.
  271          */
  272         pageoff = (pos & PAGE_MASK);
  273         pos -= pageoff;
  274 
  275         /* Adjust size for rounding (on both ends). */
  276         size += pageoff;                        /* low end... */
  277         size = (vm_size_t) round_page(size);    /* hi end */
  278 
  279         /* Ensure alignment is at least a page and fits in a pointer. */
  280         align = flags & MAP_ALIGNMENT_MASK;
  281         if (align != 0 && align != MAP_ALIGNED_SUPER &&
  282             (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
  283             align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
  284                 return (EINVAL);
  285 
  286         /*
  287          * Check for illegal addresses.  Watch out for address wrap... Note
  288          * that VM_*_ADDRESS are not constants due to casts (argh).
  289          */
  290         if (flags & MAP_FIXED) {
  291                 /*
  292                  * The specified address must have the same remainder
  293                  * as the file offset taken modulo PAGE_SIZE, so it
  294                  * should be aligned after adjustment by pageoff.
  295                  */
  296                 addr -= pageoff;
  297                 if (addr & PAGE_MASK)
  298                         return (EINVAL);
  299 
  300                 /* Address range must be all in user VM space. */
  301                 if (!vm_map_range_valid(&vms->vm_map, addr, addr + size))
  302                         return (EINVAL);
  303 #ifdef MAP_32BIT
  304                 if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
  305                         return (EINVAL);
  306         } else if (flags & MAP_32BIT) {
  307                 /*
  308                  * For MAP_32BIT, override the hint if it is too high and
  309                  * do not bother moving the mapping past the heap (since
  310                  * the heap is usually above 2GB).
  311                  */
  312                 if (addr + size > MAP_32BIT_MAX_ADDR)
  313                         addr = 0;
  314 #endif
  315         } else {
  316                 /*
  317                  * XXX for non-fixed mappings where no hint is provided or
  318                  * the hint would fall in the potential heap space,
  319                  * place it after the end of the largest possible heap.
  320                  *
  321                  * There should really be a pmap call to determine a reasonable
  322                  * location.
  323                  */
  324                 if (addr == 0 ||
  325                     (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
  326                     addr < round_page((vm_offset_t)vms->vm_daddr +
  327                     lim_max(td, RLIMIT_DATA))))
  328                         addr = round_page((vm_offset_t)vms->vm_daddr +
  329                             lim_max(td, RLIMIT_DATA));
  330         }
  331         if (size == 0) {
  332                 /*
  333                  * Return success without mapping anything for old
  334                  * binaries that request a page-aligned mapping of
  335                  * length 0.  For modern binaries, this function
  336                  * returns an error earlier.
  337                  */
  338                 error = 0;
  339         } else if ((flags & MAP_GUARD) != 0) {
  340                 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
  341                     VM_PROT_NONE, flags, NULL, pos, FALSE, td);
  342         } else if ((flags & MAP_ANON) != 0) {
  343                 /*
  344                  * Mapping blank space is trivial.
  345                  *
  346                  * This relies on VM_PROT_* matching PROT_*.
  347                  */
  348                 error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
  349                     VM_PROT_ALL, flags, NULL, pos, FALSE, td);
  350         } else {
  351                 /*
  352                  * Mapping file, get fp for validation and don't let the
  353                  * descriptor disappear on us if we block. Check capability
  354                  * rights, but also return the maximum rights to be combined
  355                  * with maxprot later.
  356                  */
  357                 cap_rights_init(&rights, CAP_MMAP);
  358                 if (prot & PROT_READ)
  359                         cap_rights_set(&rights, CAP_MMAP_R);
  360                 if ((flags & MAP_SHARED) != 0) {
  361                         if (prot & PROT_WRITE)
  362                                 cap_rights_set(&rights, CAP_MMAP_W);
  363                 }
  364                 if (prot & PROT_EXEC)
  365                         cap_rights_set(&rights, CAP_MMAP_X);
  366                 error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
  367                 if (error != 0)
  368                         goto done;
  369                 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
  370                     p->p_osrel >= P_OSREL_MAP_FSTRICT) {
  371                         error = EINVAL;
  372                         goto done;
  373                 }
  374                 if (check_fp_fn != NULL) {
  375                         error = check_fp_fn(fp, prot, cap_maxprot, flags);
  376                         if (error != 0)
  377                                 goto done;
  378                 }
  379                 /* This relies on VM_PROT_* matching PROT_*. */
  380                 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
  381                     cap_maxprot, flags, pos, td);
  382         }
  383 
  384         if (error == 0)
  385                 td->td_retval[0] = (register_t) (addr + pageoff);
  386 done:
  387         if (fp)
  388                 fdrop(fp, td);
  389 
  390         return (error);
  391 }
  392 
  393 #if defined(COMPAT_FREEBSD6)
  394 int
  395 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
  396 {
  397 
  398         return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
  399             uap->flags, uap->fd, uap->pos));
  400 }
  401 #endif
  402 
  403 #ifdef COMPAT_43
  404 #ifndef _SYS_SYSPROTO_H_
  405 struct ommap_args {
  406         caddr_t addr;
  407         int len;
  408         int prot;
  409         int flags;
  410         int fd;
  411         long pos;
  412 };
  413 #endif
  414 int
  415 ommap(struct thread *td, struct ommap_args *uap)
  416 {
  417         static const char cvtbsdprot[8] = {
  418                 0,
  419                 PROT_EXEC,
  420                 PROT_WRITE,
  421                 PROT_EXEC | PROT_WRITE,
  422                 PROT_READ,
  423                 PROT_EXEC | PROT_READ,
  424                 PROT_WRITE | PROT_READ,
  425                 PROT_EXEC | PROT_WRITE | PROT_READ,
  426         };
  427         int flags, prot;
  428 
  429 #define OMAP_ANON       0x0002
  430 #define OMAP_COPY       0x0020
  431 #define OMAP_SHARED     0x0010
  432 #define OMAP_FIXED      0x0100
  433 
  434         prot = cvtbsdprot[uap->prot & 0x7];
  435 #ifdef COMPAT_FREEBSD32
  436 #if defined(__amd64__)
  437         if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
  438             prot != 0)
  439                 prot |= PROT_EXEC;
  440 #endif
  441 #endif
  442         flags = 0;
  443         if (uap->flags & OMAP_ANON)
  444                 flags |= MAP_ANON;
  445         if (uap->flags & OMAP_COPY)
  446                 flags |= MAP_COPY;
  447         if (uap->flags & OMAP_SHARED)
  448                 flags |= MAP_SHARED;
  449         else
  450                 flags |= MAP_PRIVATE;
  451         if (uap->flags & OMAP_FIXED)
  452                 flags |= MAP_FIXED;
  453         return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
  454             uap->fd, uap->pos));
  455 }
  456 #endif                          /* COMPAT_43 */
  457 
  458 
  459 #ifndef _SYS_SYSPROTO_H_
  460 struct msync_args {
  461         void *addr;
  462         size_t len;
  463         int flags;
  464 };
  465 #endif
  466 int
  467 sys_msync(struct thread *td, struct msync_args *uap)
  468 {
  469 
  470         return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
  471 }
  472 
  473 int
  474 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
  475 {
  476         vm_offset_t addr;
  477         vm_size_t pageoff;
  478         vm_map_t map;
  479         int rv;
  480 
  481         addr = addr0;
  482         pageoff = (addr & PAGE_MASK);
  483         addr -= pageoff;
  484         size += pageoff;
  485         size = (vm_size_t) round_page(size);
  486         if (addr + size < addr)
  487                 return (EINVAL);
  488 
  489         if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
  490                 return (EINVAL);
  491 
  492         map = &td->td_proc->p_vmspace->vm_map;
  493 
  494         /*
  495          * Clean the pages and interpret the return value.
  496          */
  497         rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
  498             (flags & MS_INVALIDATE) != 0);
  499         switch (rv) {
  500         case KERN_SUCCESS:
  501                 return (0);
  502         case KERN_INVALID_ADDRESS:
  503                 return (ENOMEM);
  504         case KERN_INVALID_ARGUMENT:
  505                 return (EBUSY);
  506         case KERN_FAILURE:
  507                 return (EIO);
  508         default:
  509                 return (EINVAL);
  510         }
  511 }
  512 
  513 #ifndef _SYS_SYSPROTO_H_
  514 struct munmap_args {
  515         void *addr;
  516         size_t len;
  517 };
  518 #endif
  519 int
  520 sys_munmap(struct thread *td, struct munmap_args *uap)
  521 {
  522 
  523         return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
  524 }
  525 
  526 int
  527 kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
  528 {
  529 #ifdef HWPMC_HOOKS
  530         struct pmckern_map_out pkm;
  531         vm_map_entry_t entry;
  532         bool pmc_handled;
  533 #endif
  534         vm_offset_t addr, end;
  535         vm_size_t pageoff;
  536         vm_map_t map;
  537         int rv;
  538 
  539         if (size == 0)
  540                 return (EINVAL);
  541 
  542         addr = addr0;
  543         pageoff = (addr & PAGE_MASK);
  544         addr -= pageoff;
  545         size += pageoff;
  546         size = (vm_size_t) round_page(size);
  547         end = addr + size;
  548         map = &td->td_proc->p_vmspace->vm_map;
  549         if (!vm_map_range_valid(map, addr, end))
  550                 return (EINVAL);
  551 
  552         vm_map_lock(map);
  553 #ifdef HWPMC_HOOKS
  554         pmc_handled = false;
  555         if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
  556                 pmc_handled = true;
  557                 /*
  558                  * Inform hwpmc if the address range being unmapped contains
  559                  * an executable region.
  560                  */
  561                 pkm.pm_address = (uintptr_t) NULL;
  562                 if (vm_map_lookup_entry(map, addr, &entry)) {
  563                         for (; entry->start < end;
  564                             entry = entry->next) {
  565                                 if (vm_map_check_protection(map, entry->start,
  566                                         entry->end, VM_PROT_EXECUTE) == TRUE) {
  567                                         pkm.pm_address = (uintptr_t) addr;
  568                                         pkm.pm_size = (size_t) size;
  569                                         break;
  570                                 }
  571                         }
  572                 }
  573         }
  574 #endif
  575         rv = vm_map_delete(map, addr, end);
  576 
  577 #ifdef HWPMC_HOOKS
  578         if (rv == KERN_SUCCESS && __predict_false(pmc_handled)) {
  579                 /* downgrade the lock to prevent a LOR with the pmc-sx lock */
  580                 vm_map_lock_downgrade(map);
  581                 if (pkm.pm_address != (uintptr_t) NULL)
  582                         PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
  583                 vm_map_unlock_read(map);
  584         } else
  585 #endif
  586                 vm_map_unlock(map);
  587 
  588         return (vm_mmap_to_errno(rv));
  589 }
  590 
  591 #ifndef _SYS_SYSPROTO_H_
  592 struct mprotect_args {
  593         const void *addr;
  594         size_t len;
  595         int prot;
  596 };
  597 #endif
  598 int
  599 sys_mprotect(struct thread *td, struct mprotect_args *uap)
  600 {
  601 
  602         return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot));
  603 }
  604 
  605 int
  606 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
  607 {
  608         vm_offset_t addr;
  609         vm_size_t pageoff;
  610 
  611         addr = addr0;
  612         prot = (prot & VM_PROT_ALL);
  613         pageoff = (addr & PAGE_MASK);
  614         addr -= pageoff;
  615         size += pageoff;
  616         size = (vm_size_t) round_page(size);
  617 #ifdef COMPAT_FREEBSD32
  618         if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
  619                 if (((addr + size) & 0xffffffff) < addr)
  620                         return (EINVAL);
  621         } else
  622 #endif
  623         if (addr + size < addr)
  624                 return (EINVAL);
  625 
  626         switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
  627             addr + size, prot, FALSE)) {
  628         case KERN_SUCCESS:
  629                 return (0);
  630         case KERN_PROTECTION_FAILURE:
  631                 return (EACCES);
  632         case KERN_RESOURCE_SHORTAGE:
  633                 return (ENOMEM);
  634         }
  635         return (EINVAL);
  636 }
  637 
  638 #ifndef _SYS_SYSPROTO_H_
  639 struct minherit_args {
  640         void *addr;
  641         size_t len;
  642         int inherit;
  643 };
  644 #endif
  645 int
  646 sys_minherit(struct thread *td, struct minherit_args *uap)
  647 {
  648 
  649         return (kern_minherit(td, (uintptr_t)uap->addr, uap->len,
  650             uap->inherit));
  651 }
  652 
  653 int
  654 kern_minherit(struct thread *td, uintptr_t addr0, size_t len, int inherit0)
  655 {
  656         vm_offset_t addr;
  657         vm_size_t size, pageoff;
  658         vm_inherit_t inherit;
  659 
  660         addr = (vm_offset_t)addr0;
  661         size = len;
  662         inherit = inherit0;
  663 
  664         pageoff = (addr & PAGE_MASK);
  665         addr -= pageoff;
  666         size += pageoff;
  667         size = (vm_size_t) round_page(size);
  668         if (addr + size < addr)
  669                 return (EINVAL);
  670 
  671         switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
  672             addr + size, inherit)) {
  673         case KERN_SUCCESS:
  674                 return (0);
  675         case KERN_PROTECTION_FAILURE:
  676                 return (EACCES);
  677         }
  678         return (EINVAL);
  679 }
  680 
  681 #ifndef _SYS_SYSPROTO_H_
  682 struct madvise_args {
  683         void *addr;
  684         size_t len;
  685         int behav;
  686 };
  687 #endif
  688 
  689 int
  690 sys_madvise(struct thread *td, struct madvise_args *uap)
  691 {
  692 
  693         return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
  694 }
  695 
  696 int
  697 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
  698 {
  699         vm_map_t map;
  700         vm_offset_t addr, end, start;
  701         int flags;
  702 
  703         /*
  704          * Check for our special case, advising the swap pager we are
  705          * "immortal."
  706          */
  707         if (behav == MADV_PROTECT) {
  708                 flags = PPROT_SET;
  709                 return (kern_procctl(td, P_PID, td->td_proc->p_pid,
  710                     PROC_SPROTECT, &flags));
  711         }
  712 
  713         /*
  714          * Check for illegal addresses.  Watch out for address wrap... Note
  715          * that VM_*_ADDRESS are not constants due to casts (argh).
  716          */
  717         map = &td->td_proc->p_vmspace->vm_map;
  718         addr = addr0;
  719         if (!vm_map_range_valid(map, addr, addr + len))
  720                 return (EINVAL);
  721 
  722         /*
  723          * Since this routine is only advisory, we default to conservative
  724          * behavior.
  725          */
  726         start = trunc_page(addr);
  727         end = round_page(addr + len);
  728 
  729         /*
  730          * vm_map_madvise() checks for illegal values of behav.
  731          */
  732         return (vm_map_madvise(map, start, end, behav));
  733 }
  734 
  735 #ifndef _SYS_SYSPROTO_H_
  736 struct mincore_args {
  737         const void *addr;
  738         size_t len;
  739         char *vec;
  740 };
  741 #endif
  742 
  743 int
  744 sys_mincore(struct thread *td, struct mincore_args *uap)
  745 {
  746 
  747         return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
  748 }
  749 
  750 int
  751 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
  752 {
  753         vm_offset_t addr, first_addr;
  754         vm_offset_t end, cend;
  755         pmap_t pmap;
  756         vm_map_t map;
  757         int error = 0;
  758         int vecindex, lastvecindex;
  759         vm_map_entry_t current;
  760         vm_map_entry_t entry;
  761         vm_object_t object;
  762         vm_paddr_t locked_pa;
  763         vm_page_t m;
  764         vm_pindex_t pindex;
  765         int mincoreinfo;
  766         unsigned int timestamp;
  767         boolean_t locked;
  768 
  769         /*
  770          * Make sure that the addresses presented are valid for user
  771          * mode.
  772          */
  773         first_addr = addr = trunc_page(addr0);
  774         end = addr + (vm_size_t)round_page(len);
  775         map = &td->td_proc->p_vmspace->vm_map;
  776         if (end > vm_map_max(map) || end < addr)
  777                 return (ENOMEM);
  778 
  779         pmap = vmspace_pmap(td->td_proc->p_vmspace);
  780 
  781         vm_map_lock_read(map);
  782 RestartScan:
  783         timestamp = map->timestamp;
  784 
  785         if (!vm_map_lookup_entry(map, addr, &entry)) {
  786                 vm_map_unlock_read(map);
  787                 return (ENOMEM);
  788         }
  789 
  790         /*
  791          * Do this on a map entry basis so that if the pages are not
  792          * in the current processes address space, we can easily look
  793          * up the pages elsewhere.
  794          */
  795         lastvecindex = -1;
  796         for (current = entry; current->start < end; current = current->next) {
  797 
  798                 /*
  799                  * check for contiguity
  800                  */
  801                 if (current->end < end && current->next->start > current->end) {
  802                         vm_map_unlock_read(map);
  803                         return (ENOMEM);
  804                 }
  805 
  806                 /*
  807                  * ignore submaps (for now) or null objects
  808                  */
  809                 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
  810                         current->object.vm_object == NULL)
  811                         continue;
  812 
  813                 /*
  814                  * limit this scan to the current map entry and the
  815                  * limits for the mincore call
  816                  */
  817                 if (addr < current->start)
  818                         addr = current->start;
  819                 cend = current->end;
  820                 if (cend > end)
  821                         cend = end;
  822 
  823                 /*
  824                  * scan this entry one page at a time
  825                  */
  826                 while (addr < cend) {
  827                         /*
  828                          * Check pmap first, it is likely faster, also
  829                          * it can provide info as to whether we are the
  830                          * one referencing or modifying the page.
  831                          */
  832                         object = NULL;
  833                         locked_pa = 0;
  834                 retry:
  835                         m = NULL;
  836                         mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
  837                         if (mincore_mapped) {
  838                                 /*
  839                                  * We only care about this pmap's
  840                                  * mapping of the page, if any.
  841                                  */
  842                                 if (locked_pa != 0) {
  843                                         vm_page_unlock(PHYS_TO_VM_PAGE(
  844                                             locked_pa));
  845                                 }
  846                         } else if (locked_pa != 0) {
  847                                 /*
  848                                  * The page is mapped by this process but not
  849                                  * both accessed and modified.  It is also
  850                                  * managed.  Acquire the object lock so that
  851                                  * other mappings might be examined.
  852                                  */
  853                                 m = PHYS_TO_VM_PAGE(locked_pa);
  854                                 if (m->object != object) {
  855                                         if (object != NULL)
  856                                                 VM_OBJECT_WUNLOCK(object);
  857                                         object = m->object;
  858                                         locked = VM_OBJECT_TRYWLOCK(object);
  859                                         vm_page_unlock(m);
  860                                         if (!locked) {
  861                                                 VM_OBJECT_WLOCK(object);
  862                                                 vm_page_lock(m);
  863                                                 goto retry;
  864                                         }
  865                                 } else
  866                                         vm_page_unlock(m);
  867                                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
  868                                     ("mincore: page %p is mapped but invalid",
  869                                     m));
  870                         } else if (mincoreinfo == 0) {
  871                                 /*
  872                                  * The page is not mapped by this process.  If
  873                                  * the object implements managed pages, then
  874                                  * determine if the page is resident so that
  875                                  * the mappings might be examined.
  876                                  */
  877                                 if (current->object.vm_object != object) {
  878                                         if (object != NULL)
  879                                                 VM_OBJECT_WUNLOCK(object);
  880                                         object = current->object.vm_object;
  881                                         VM_OBJECT_WLOCK(object);
  882                                 }
  883                                 if (object->type == OBJT_DEFAULT ||
  884                                     object->type == OBJT_SWAP ||
  885                                     object->type == OBJT_VNODE) {
  886                                         pindex = OFF_TO_IDX(current->offset +
  887                                             (addr - current->start));
  888                                         m = vm_page_lookup(object, pindex);
  889                                         if (m != NULL && m->valid == 0)
  890                                                 m = NULL;
  891                                         if (m != NULL)
  892                                                 mincoreinfo = MINCORE_INCORE;
  893                                 }
  894                         }
  895                         if (m != NULL) {
  896                                 /* Examine other mappings to the page. */
  897                                 if (m->dirty == 0 && pmap_is_modified(m))
  898                                         vm_page_dirty(m);
  899                                 if (m->dirty != 0)
  900                                         mincoreinfo |= MINCORE_MODIFIED_OTHER;
  901                                 /*
  902                                  * The first test for PGA_REFERENCED is an
  903                                  * optimization.  The second test is
  904                                  * required because a concurrent pmap
  905                                  * operation could clear the last reference
  906                                  * and set PGA_REFERENCED before the call to
  907                                  * pmap_is_referenced(). 
  908                                  */
  909                                 if ((m->aflags & PGA_REFERENCED) != 0 ||
  910                                     pmap_is_referenced(m) ||
  911                                     (m->aflags & PGA_REFERENCED) != 0)
  912                                         mincoreinfo |= MINCORE_REFERENCED_OTHER;
  913                         }
  914                         if (object != NULL)
  915                                 VM_OBJECT_WUNLOCK(object);
  916 
  917                         /*
  918                          * subyte may page fault.  In case it needs to modify
  919                          * the map, we release the lock.
  920                          */
  921                         vm_map_unlock_read(map);
  922 
  923                         /*
  924                          * calculate index into user supplied byte vector
  925                          */
  926                         vecindex = atop(addr - first_addr);
  927 
  928                         /*
  929                          * If we have skipped map entries, we need to make sure that
  930                          * the byte vector is zeroed for those skipped entries.
  931                          */
  932                         while ((lastvecindex + 1) < vecindex) {
  933                                 ++lastvecindex;
  934                                 error = subyte(vec + lastvecindex, 0);
  935                                 if (error) {
  936                                         error = EFAULT;
  937                                         goto done2;
  938                                 }
  939                         }
  940 
  941                         /*
  942                          * Pass the page information to the user
  943                          */
  944                         error = subyte(vec + vecindex, mincoreinfo);
  945                         if (error) {
  946                                 error = EFAULT;
  947                                 goto done2;
  948                         }
  949 
  950                         /*
  951                          * If the map has changed, due to the subyte, the previous
  952                          * output may be invalid.
  953                          */
  954                         vm_map_lock_read(map);
  955                         if (timestamp != map->timestamp)
  956                                 goto RestartScan;
  957 
  958                         lastvecindex = vecindex;
  959                         addr += PAGE_SIZE;
  960                 }
  961         }
  962 
  963         /*
  964          * subyte may page fault.  In case it needs to modify
  965          * the map, we release the lock.
  966          */
  967         vm_map_unlock_read(map);
  968 
  969         /*
  970          * Zero the last entries in the byte vector.
  971          */
  972         vecindex = atop(end - first_addr);
  973         while ((lastvecindex + 1) < vecindex) {
  974                 ++lastvecindex;
  975                 error = subyte(vec + lastvecindex, 0);
  976                 if (error) {
  977                         error = EFAULT;
  978                         goto done2;
  979                 }
  980         }
  981 
  982         /*
  983          * If the map has changed, due to the subyte, the previous
  984          * output may be invalid.
  985          */
  986         vm_map_lock_read(map);
  987         if (timestamp != map->timestamp)
  988                 goto RestartScan;
  989         vm_map_unlock_read(map);
  990 done2:
  991         return (error);
  992 }
  993 
  994 #ifndef _SYS_SYSPROTO_H_
  995 struct mlock_args {
  996         const void *addr;
  997         size_t len;
  998 };
  999 #endif
 1000 int
 1001 sys_mlock(struct thread *td, struct mlock_args *uap)
 1002 {
 1003 
 1004         return (kern_mlock(td->td_proc, td->td_ucred,
 1005             __DECONST(uintptr_t, uap->addr), uap->len));
 1006 }
 1007 
 1008 int
 1009 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
 1010 {
 1011         vm_offset_t addr, end, last, start;
 1012         vm_size_t npages, size;
 1013         vm_map_t map;
 1014         unsigned long nsize;
 1015         int error;
 1016 
 1017         error = priv_check_cred(cred, PRIV_VM_MLOCK, 0);
 1018         if (error)
 1019                 return (error);
 1020         addr = addr0;
 1021         size = len;
 1022         last = addr + size;
 1023         start = trunc_page(addr);
 1024         end = round_page(last);
 1025         if (last < addr || end < addr)
 1026                 return (EINVAL);
 1027         npages = atop(end - start);
 1028         if ((u_int)npages > vm_page_max_user_wired)
 1029                 return (ENOMEM);
 1030         map = &proc->p_vmspace->vm_map;
 1031         PROC_LOCK(proc);
 1032         nsize = ptoa(npages + pmap_wired_count(map->pmap));
 1033         if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
 1034                 PROC_UNLOCK(proc);
 1035                 return (ENOMEM);
 1036         }
 1037         PROC_UNLOCK(proc);
 1038 #ifdef RACCT
 1039         if (racct_enable) {
 1040                 PROC_LOCK(proc);
 1041                 error = racct_set(proc, RACCT_MEMLOCK, nsize);
 1042                 PROC_UNLOCK(proc);
 1043                 if (error != 0)
 1044                         return (ENOMEM);
 1045         }
 1046 #endif
 1047         error = vm_map_wire(map, start, end,
 1048             VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
 1049 #ifdef RACCT
 1050         if (racct_enable && error != KERN_SUCCESS) {
 1051                 PROC_LOCK(proc);
 1052                 racct_set(proc, RACCT_MEMLOCK,
 1053                     ptoa(pmap_wired_count(map->pmap)));
 1054                 PROC_UNLOCK(proc);
 1055         }
 1056 #endif
 1057         return (error == KERN_SUCCESS ? 0 : ENOMEM);
 1058 }
 1059 
 1060 #ifndef _SYS_SYSPROTO_H_
 1061 struct mlockall_args {
 1062         int     how;
 1063 };
 1064 #endif
 1065 
 1066 int
 1067 sys_mlockall(struct thread *td, struct mlockall_args *uap)
 1068 {
 1069         vm_map_t map;
 1070         int error;
 1071 
 1072         map = &td->td_proc->p_vmspace->vm_map;
 1073         error = priv_check(td, PRIV_VM_MLOCK);
 1074         if (error)
 1075                 return (error);
 1076 
 1077         if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
 1078                 return (EINVAL);
 1079 
 1080         /*
 1081          * If wiring all pages in the process would cause it to exceed
 1082          * a hard resource limit, return ENOMEM.
 1083          */
 1084         if (!old_mlock && uap->how & MCL_CURRENT) {
 1085                 if (map->size > lim_cur(td, RLIMIT_MEMLOCK))
 1086                         return (ENOMEM);
 1087         }
 1088 #ifdef RACCT
 1089         if (racct_enable) {
 1090                 PROC_LOCK(td->td_proc);
 1091                 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
 1092                 PROC_UNLOCK(td->td_proc);
 1093                 if (error != 0)
 1094                         return (ENOMEM);
 1095         }
 1096 #endif
 1097 
 1098         if (uap->how & MCL_FUTURE) {
 1099                 vm_map_lock(map);
 1100                 vm_map_modflags(map, MAP_WIREFUTURE, 0);
 1101                 vm_map_unlock(map);
 1102                 error = 0;
 1103         }
 1104 
 1105         if (uap->how & MCL_CURRENT) {
 1106                 /*
 1107                  * P1003.1-2001 mandates that all currently mapped pages
 1108                  * will be memory resident and locked (wired) upon return
 1109                  * from mlockall(). vm_map_wire() will wire pages, by
 1110                  * calling vm_fault_wire() for each page in the region.
 1111                  */
 1112                 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
 1113                     VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
 1114                 if (error == KERN_SUCCESS)
 1115                         error = 0;
 1116                 else if (error == KERN_RESOURCE_SHORTAGE)
 1117                         error = ENOMEM;
 1118                 else
 1119                         error = EAGAIN;
 1120         }
 1121 #ifdef RACCT
 1122         if (racct_enable && error != KERN_SUCCESS) {
 1123                 PROC_LOCK(td->td_proc);
 1124                 racct_set(td->td_proc, RACCT_MEMLOCK,
 1125                     ptoa(pmap_wired_count(map->pmap)));
 1126                 PROC_UNLOCK(td->td_proc);
 1127         }
 1128 #endif
 1129 
 1130         return (error);
 1131 }
 1132 
 1133 #ifndef _SYS_SYSPROTO_H_
 1134 struct munlockall_args {
 1135         register_t dummy;
 1136 };
 1137 #endif
 1138 
 1139 int
 1140 sys_munlockall(struct thread *td, struct munlockall_args *uap)
 1141 {
 1142         vm_map_t map;
 1143         int error;
 1144 
 1145         map = &td->td_proc->p_vmspace->vm_map;
 1146         error = priv_check(td, PRIV_VM_MUNLOCK);
 1147         if (error)
 1148                 return (error);
 1149 
 1150         /* Clear the MAP_WIREFUTURE flag from this vm_map. */
 1151         vm_map_lock(map);
 1152         vm_map_modflags(map, 0, MAP_WIREFUTURE);
 1153         vm_map_unlock(map);
 1154 
 1155         /* Forcibly unwire all pages. */
 1156         error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
 1157             VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
 1158 #ifdef RACCT
 1159         if (racct_enable && error == KERN_SUCCESS) {
 1160                 PROC_LOCK(td->td_proc);
 1161                 racct_set(td->td_proc, RACCT_MEMLOCK, 0);
 1162                 PROC_UNLOCK(td->td_proc);
 1163         }
 1164 #endif
 1165 
 1166         return (error);
 1167 }
 1168 
 1169 #ifndef _SYS_SYSPROTO_H_
 1170 struct munlock_args {
 1171         const void *addr;
 1172         size_t len;
 1173 };
 1174 #endif
 1175 int
 1176 sys_munlock(struct thread *td, struct munlock_args *uap)
 1177 {
 1178 
 1179         return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
 1180 }
 1181 
 1182 int
 1183 kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
 1184 {
 1185         vm_offset_t addr, end, last, start;
 1186 #ifdef RACCT
 1187         vm_map_t map;
 1188 #endif
 1189         int error;
 1190 
 1191         error = priv_check(td, PRIV_VM_MUNLOCK);
 1192         if (error)
 1193                 return (error);
 1194         addr = addr0;
 1195         last = addr + size;
 1196         start = trunc_page(addr);
 1197         end = round_page(last);
 1198         if (last < addr || end < addr)
 1199                 return (EINVAL);
 1200         error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
 1201             VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
 1202 #ifdef RACCT
 1203         if (racct_enable && error == KERN_SUCCESS) {
 1204                 PROC_LOCK(td->td_proc);
 1205                 map = &td->td_proc->p_vmspace->vm_map;
 1206                 racct_set(td->td_proc, RACCT_MEMLOCK,
 1207                     ptoa(pmap_wired_count(map->pmap)));
 1208                 PROC_UNLOCK(td->td_proc);
 1209         }
 1210 #endif
 1211         return (error == KERN_SUCCESS ? 0 : ENOMEM);
 1212 }
 1213 
 1214 /*
 1215  * vm_mmap_vnode()
 1216  *
 1217  * Helper function for vm_mmap.  Perform sanity check specific for mmap
 1218  * operations on vnodes.
 1219  */
 1220 int
 1221 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
 1222     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
 1223     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
 1224     boolean_t *writecounted)
 1225 {
 1226         struct vattr va;
 1227         vm_object_t obj;
 1228         vm_ooffset_t foff;
 1229         struct ucred *cred;
 1230         int error, flags;
 1231         bool writex;
 1232 
 1233         cred = td->td_ucred;
 1234         writex = (*maxprotp & VM_PROT_WRITE) != 0 &&
 1235             (*flagsp & MAP_SHARED) != 0;
 1236         if ((error = vget(vp, LK_SHARED, td)) != 0)
 1237                 return (error);
 1238         AUDIT_ARG_VNODE1(vp);
 1239         foff = *foffp;
 1240         flags = *flagsp;
 1241         obj = vp->v_object;
 1242         if (vp->v_type == VREG) {
 1243                 /*
 1244                  * Get the proper underlying object
 1245                  */
 1246                 if (obj == NULL) {
 1247                         error = EINVAL;
 1248                         goto done;
 1249                 }
 1250                 if (obj->type == OBJT_VNODE && obj->handle != vp) {
 1251                         vput(vp);
 1252                         vp = (struct vnode *)obj->handle;
 1253                         /*
 1254                          * Bypass filesystems obey the mpsafety of the
 1255                          * underlying fs.  Tmpfs never bypasses.
 1256                          */
 1257                         error = vget(vp, LK_SHARED, td);
 1258                         if (error != 0)
 1259                                 return (error);
 1260                 }
 1261                 if (writex) {
 1262                         *writecounted = TRUE;
 1263                         vm_pager_update_writecount(obj, 0, objsize);
 1264                 }
 1265         } else {
 1266                 error = EINVAL;
 1267                 goto done;
 1268         }
 1269         if ((error = VOP_GETATTR(vp, &va, cred)))
 1270                 goto done;
 1271 #ifdef MAC
 1272         /* This relies on VM_PROT_* matching PROT_*. */
 1273         error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
 1274         if (error != 0)
 1275                 goto done;
 1276 #endif
 1277         if ((flags & MAP_SHARED) != 0) {
 1278                 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
 1279                         if (prot & VM_PROT_WRITE) {
 1280                                 error = EPERM;
 1281                                 goto done;
 1282                         }
 1283                         *maxprotp &= ~VM_PROT_WRITE;
 1284                 }
 1285         }
 1286         /*
 1287          * If it is a regular file without any references
 1288          * we do not need to sync it.
 1289          * Adjust object size to be the size of actual file.
 1290          */
 1291         objsize = round_page(va.va_size);
 1292         if (va.va_nlink == 0)
 1293                 flags |= MAP_NOSYNC;
 1294         if (obj->type == OBJT_VNODE) {
 1295                 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
 1296                     cred);
 1297                 if (obj == NULL) {
 1298                         error = ENOMEM;
 1299                         goto done;
 1300                 }
 1301         } else {
 1302                 KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
 1303                     ("wrong object type"));
 1304                 VM_OBJECT_WLOCK(obj);
 1305                 vm_object_reference_locked(obj);
 1306 #if VM_NRESERVLEVEL > 0
 1307                 vm_object_color(obj, 0);
 1308 #endif
 1309                 VM_OBJECT_WUNLOCK(obj);
 1310         }
 1311         *objp = obj;
 1312         *flagsp = flags;
 1313 
 1314         vfs_mark_atime(vp, cred);
 1315 
 1316 done:
 1317         if (error != 0 && *writecounted) {
 1318                 *writecounted = FALSE;
 1319                 vm_pager_update_writecount(obj, objsize, 0);
 1320         }
 1321         vput(vp);
 1322         return (error);
 1323 }
 1324 
 1325 /*
 1326  * vm_mmap_cdev()
 1327  *
 1328  * Helper function for vm_mmap.  Perform sanity check specific for mmap
 1329  * operations on cdevs.
 1330  */
 1331 int
 1332 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
 1333     vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
 1334     vm_ooffset_t *foff, vm_object_t *objp)
 1335 {
 1336         vm_object_t obj;
 1337         int error, flags;
 1338 
 1339         flags = *flagsp;
 1340 
 1341         if (dsw->d_flags & D_MMAP_ANON) {
 1342                 *objp = NULL;
 1343                 *foff = 0;
 1344                 *maxprotp = VM_PROT_ALL;
 1345                 *flagsp |= MAP_ANON;
 1346                 return (0);
 1347         }
 1348         /*
 1349          * cdevs do not provide private mappings of any kind.
 1350          */
 1351         if ((*maxprotp & VM_PROT_WRITE) == 0 &&
 1352             (prot & VM_PROT_WRITE) != 0)
 1353                 return (EACCES);
 1354         if (flags & (MAP_PRIVATE|MAP_COPY))
 1355                 return (EINVAL);
 1356         /*
 1357          * Force device mappings to be shared.
 1358          */
 1359         flags |= MAP_SHARED;
 1360 #ifdef MAC_XXX
 1361         error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
 1362         if (error != 0)
 1363                 return (error);
 1364 #endif
 1365         /*
 1366          * First, try d_mmap_single().  If that is not implemented
 1367          * (returns ENODEV), fall back to using the device pager.
 1368          * Note that d_mmap_single() must return a reference to the
 1369          * object (it needs to bump the reference count of the object
 1370          * it returns somehow).
 1371          *
 1372          * XXX assumes VM_PROT_* == PROT_*
 1373          */
 1374         error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
 1375         if (error != ENODEV)
 1376                 return (error);
 1377         obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
 1378             td->td_ucred);
 1379         if (obj == NULL)
 1380                 return (EINVAL);
 1381         *objp = obj;
 1382         *flagsp = flags;
 1383         return (0);
 1384 }
 1385 
 1386 /*
 1387  * vm_mmap()
 1388  *
 1389  * Internal version of mmap used by exec, sys5 shared memory, and
 1390  * various device drivers.  Handle is either a vnode pointer, a
 1391  * character device, or NULL for MAP_ANON.
 1392  */
 1393 int
 1394 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
 1395         vm_prot_t maxprot, int flags,
 1396         objtype_t handle_type, void *handle,
 1397         vm_ooffset_t foff)
 1398 {
 1399         vm_object_t object;
 1400         struct thread *td = curthread;
 1401         int error;
 1402         boolean_t writecounted;
 1403 
 1404         if (size == 0)
 1405                 return (EINVAL);
 1406 
 1407         size = round_page(size);
 1408         object = NULL;
 1409         writecounted = FALSE;
 1410 
 1411         /*
 1412          * Lookup/allocate object.
 1413          */
 1414         switch (handle_type) {
 1415         case OBJT_DEVICE: {
 1416                 struct cdevsw *dsw;
 1417                 struct cdev *cdev;
 1418                 int ref;
 1419 
 1420                 cdev = handle;
 1421                 dsw = dev_refthread(cdev, &ref);
 1422                 if (dsw == NULL)
 1423                         return (ENXIO);
 1424                 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
 1425                     dsw, &foff, &object);
 1426                 dev_relthread(cdev, ref);
 1427                 break;
 1428         }
 1429         case OBJT_VNODE:
 1430                 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
 1431                     handle, &foff, &object, &writecounted);
 1432                 break;
 1433         case OBJT_DEFAULT:
 1434                 if (handle == NULL) {
 1435                         error = 0;
 1436                         break;
 1437                 }
 1438                 /* FALLTHROUGH */
 1439         default:
 1440                 error = EINVAL;
 1441                 break;
 1442         }
 1443         if (error)
 1444                 return (error);
 1445 
 1446         error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
 1447             foff, writecounted, td);
 1448         if (error != 0 && object != NULL) {
 1449                 /*
 1450                  * If this mapping was accounted for in the vnode's
 1451                  * writecount, then undo that now.
 1452                  */
 1453                 if (writecounted)
 1454                         vm_pager_release_writecount(object, 0, size);
 1455                 vm_object_deallocate(object);
 1456         }
 1457         return (error);
 1458 }
 1459 
 1460 int
 1461 kern_mmap_racct_check(struct thread *td, vm_map_t map, vm_size_t size)
 1462 {
 1463         int error;
 1464 
 1465         RACCT_PROC_LOCK(td->td_proc);
 1466         if (map->size + size > lim_cur(td, RLIMIT_VMEM)) {
 1467                 RACCT_PROC_UNLOCK(td->td_proc);
 1468                 return (ENOMEM);
 1469         }
 1470         if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
 1471                 RACCT_PROC_UNLOCK(td->td_proc);
 1472                 return (ENOMEM);
 1473         }
 1474         if (!old_mlock && map->flags & MAP_WIREFUTURE) {
 1475                 if (ptoa(pmap_wired_count(map->pmap)) + size >
 1476                     lim_cur(td, RLIMIT_MEMLOCK)) {
 1477                         racct_set_force(td->td_proc, RACCT_VMEM, map->size);
 1478                         RACCT_PROC_UNLOCK(td->td_proc);
 1479                         return (ENOMEM);
 1480                 }
 1481                 error = racct_set(td->td_proc, RACCT_MEMLOCK,
 1482                     ptoa(pmap_wired_count(map->pmap)) + size);
 1483                 if (error != 0) {
 1484                         racct_set_force(td->td_proc, RACCT_VMEM, map->size);
 1485                         RACCT_PROC_UNLOCK(td->td_proc);
 1486                         return (error);
 1487                 }
 1488         }
 1489         RACCT_PROC_UNLOCK(td->td_proc);
 1490         return (0);
 1491 }
 1492 
 1493 /*
 1494  * Internal version of mmap that maps a specific VM object into an
 1495  * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
 1496  */
 1497 int
 1498 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
 1499     vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
 1500     boolean_t writecounted, struct thread *td)
 1501 {
 1502         vm_offset_t max_addr;
 1503         int docow, error, findspace, rv;
 1504         bool curmap, fitit;
 1505 
 1506         curmap = map == &td->td_proc->p_vmspace->vm_map;
 1507         if (curmap) {
 1508                 error = kern_mmap_racct_check(td, map, size);
 1509                 if (error != 0)
 1510                         return (error);
 1511         }
 1512 
 1513         /*
 1514          * We currently can only deal with page aligned file offsets.
 1515          * The mmap() system call already enforces this by subtracting
 1516          * the page offset from the file offset, but checking here
 1517          * catches errors in device drivers (e.g. d_single_mmap()
 1518          * callbacks) and other internal mapping requests (such as in
 1519          * exec).
 1520          */
 1521         if (foff & PAGE_MASK)
 1522                 return (EINVAL);
 1523 
 1524         if ((flags & MAP_FIXED) == 0) {
 1525                 fitit = TRUE;
 1526                 *addr = round_page(*addr);
 1527         } else {
 1528                 if (*addr != trunc_page(*addr))
 1529                         return (EINVAL);
 1530                 fitit = FALSE;
 1531         }
 1532 
 1533         if (flags & MAP_ANON) {
 1534                 if (object != NULL || foff != 0)
 1535                         return (EINVAL);
 1536                 docow = 0;
 1537         } else if (flags & MAP_PREFAULT_READ)
 1538                 docow = MAP_PREFAULT;
 1539         else
 1540                 docow = MAP_PREFAULT_PARTIAL;
 1541 
 1542         if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
 1543                 docow |= MAP_COPY_ON_WRITE;
 1544         if (flags & MAP_NOSYNC)
 1545                 docow |= MAP_DISABLE_SYNCER;
 1546         if (flags & MAP_NOCORE)
 1547                 docow |= MAP_DISABLE_COREDUMP;
 1548         /* Shared memory is also shared with children. */
 1549         if (flags & MAP_SHARED)
 1550                 docow |= MAP_INHERIT_SHARE;
 1551         if (writecounted)
 1552                 docow |= MAP_WRITECOUNT;
 1553         if (flags & MAP_STACK) {
 1554                 if (object != NULL)
 1555                         return (EINVAL);
 1556                 docow |= MAP_STACK_GROWS_DOWN;
 1557         }
 1558         if ((flags & MAP_EXCL) != 0)
 1559                 docow |= MAP_CHECK_EXCL;
 1560         if ((flags & MAP_GUARD) != 0)
 1561                 docow |= MAP_CREATE_GUARD;
 1562 
 1563         if (fitit) {
 1564                 if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
 1565                         findspace = VMFS_SUPER_SPACE;
 1566                 else if ((flags & MAP_ALIGNMENT_MASK) != 0)
 1567                         findspace = VMFS_ALIGNED_SPACE(flags >>
 1568                             MAP_ALIGNMENT_SHIFT);
 1569                 else
 1570                         findspace = VMFS_OPTIMAL_SPACE;
 1571                 max_addr = 0;
 1572 #ifdef MAP_32BIT
 1573                 if ((flags & MAP_32BIT) != 0)
 1574                         max_addr = MAP_32BIT_MAX_ADDR;
 1575 #endif
 1576                 if (curmap) {
 1577                         rv = vm_map_find_min(map, object, foff, addr, size,
 1578                             round_page((vm_offset_t)td->td_proc->p_vmspace->
 1579                             vm_daddr + lim_max(td, RLIMIT_DATA)), max_addr,
 1580                             findspace, prot, maxprot, docow);
 1581                 } else {
 1582                         rv = vm_map_find(map, object, foff, addr, size,
 1583                             max_addr, findspace, prot, maxprot, docow);
 1584                 }
 1585         } else {
 1586                 rv = vm_map_fixed(map, object, foff, *addr, size,
 1587                     prot, maxprot, docow);
 1588         }
 1589 
 1590         if (rv == KERN_SUCCESS) {
 1591                 /*
 1592                  * If the process has requested that all future mappings
 1593                  * be wired, then heed this.
 1594                  */
 1595                 if ((map->flags & MAP_WIREFUTURE) != 0) {
 1596                         vm_map_lock(map);
 1597                         if ((map->flags & MAP_WIREFUTURE) != 0)
 1598                                 (void)vm_map_wire_locked(map, *addr,
 1599                                     *addr + size, VM_MAP_WIRE_USER |
 1600                                     ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK :
 1601                                     VM_MAP_WIRE_NOHOLES));
 1602                         vm_map_unlock(map);
 1603                 }
 1604         }
 1605         return (vm_mmap_to_errno(rv));
 1606 }
 1607 
 1608 /*
 1609  * Translate a Mach VM return code to zero on success or the appropriate errno
 1610  * on failure.
 1611  */
 1612 int
 1613 vm_mmap_to_errno(int rv)
 1614 {
 1615 
 1616         switch (rv) {
 1617         case KERN_SUCCESS:
 1618                 return (0);
 1619         case KERN_INVALID_ADDRESS:
 1620         case KERN_NO_SPACE:
 1621                 return (ENOMEM);
 1622         case KERN_PROTECTION_FAILURE:
 1623                 return (EACCES);
 1624         default:
 1625                 return (EINVAL);
 1626         }
 1627 }

Cache object: ba5fec5afbe5ba187c690bf8ff82b9c2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.