The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/mlock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *      linux/mm/mlock.c
    3  *
    4  *  (C) Copyright 1995 Linus Torvalds
    5  */
    6 #include <linux/slab.h>
    7 #include <linux/shm.h>
    8 #include <linux/mman.h>
    9 #include <linux/smp_lock.h>
   10 #include <linux/pagemap.h>
   11 
   12 #include <asm/uaccess.h>
   13 #include <asm/pgtable.h>
   14 
   15 static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
   16 {
   17         spin_lock(&vma->vm_mm->page_table_lock);
   18         vma->vm_flags = newflags;
   19         spin_unlock(&vma->vm_mm->page_table_lock);
   20         return 0;
   21 }
   22 
   23 static inline int mlock_fixup_start(struct vm_area_struct * vma,
   24         unsigned long end, int newflags)
   25 {
   26         struct vm_area_struct * n;
   27 
   28         n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   29         if (!n)
   30                 return -EAGAIN;
   31         *n = *vma;
   32         n->vm_end = end;
   33         n->vm_flags = newflags;
   34         n->vm_raend = 0;
   35         if (n->vm_file)
   36                 get_file(n->vm_file);
   37         if (n->vm_ops && n->vm_ops->open)
   38                 n->vm_ops->open(n);
   39         vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
   40         lock_vma_mappings(vma);
   41         spin_lock(&vma->vm_mm->page_table_lock);
   42         vma->vm_start = end;
   43         __insert_vm_struct(current->mm, n);
   44         spin_unlock(&vma->vm_mm->page_table_lock);
   45         unlock_vma_mappings(vma);
   46         return 0;
   47 }
   48 
   49 static inline int mlock_fixup_end(struct vm_area_struct * vma,
   50         unsigned long start, int newflags)
   51 {
   52         struct vm_area_struct * n;
   53 
   54         n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   55         if (!n)
   56                 return -EAGAIN;
   57         *n = *vma;
   58         n->vm_start = start;
   59         n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
   60         n->vm_flags = newflags;
   61         n->vm_raend = 0;
   62         if (n->vm_file)
   63                 get_file(n->vm_file);
   64         if (n->vm_ops && n->vm_ops->open)
   65                 n->vm_ops->open(n);
   66         lock_vma_mappings(vma);
   67         spin_lock(&vma->vm_mm->page_table_lock);
   68         vma->vm_end = start;
   69         __insert_vm_struct(current->mm, n);
   70         spin_unlock(&vma->vm_mm->page_table_lock);
   71         unlock_vma_mappings(vma);
   72         return 0;
   73 }
   74 
   75 static inline int mlock_fixup_middle(struct vm_area_struct * vma,
   76         unsigned long start, unsigned long end, int newflags)
   77 {
   78         struct vm_area_struct * left, * right;
   79 
   80         left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   81         if (!left)
   82                 return -EAGAIN;
   83         right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   84         if (!right) {
   85                 kmem_cache_free(vm_area_cachep, left);
   86                 return -EAGAIN;
   87         }
   88         *left = *vma;
   89         *right = *vma;
   90         left->vm_end = start;
   91         right->vm_start = end;
   92         right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
   93         vma->vm_flags = newflags;
   94         left->vm_raend = 0;
   95         right->vm_raend = 0;
   96         if (vma->vm_file)
   97                 atomic_add(2, &vma->vm_file->f_count);
   98 
   99         if (vma->vm_ops && vma->vm_ops->open) {
  100                 vma->vm_ops->open(left);
  101                 vma->vm_ops->open(right);
  102         }
  103         vma->vm_raend = 0;
  104         vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
  105         lock_vma_mappings(vma);
  106         spin_lock(&vma->vm_mm->page_table_lock);
  107         vma->vm_start = start;
  108         vma->vm_end = end;
  109         vma->vm_flags = newflags;
  110         __insert_vm_struct(current->mm, left);
  111         __insert_vm_struct(current->mm, right);
  112         spin_unlock(&vma->vm_mm->page_table_lock);
  113         unlock_vma_mappings(vma);
  114         return 0;
  115 }
  116 
  117 static int mlock_fixup(struct vm_area_struct * vma, 
  118         unsigned long start, unsigned long end, unsigned int newflags)
  119 {
  120         int pages, retval;
  121 
  122         if (newflags == vma->vm_flags)
  123                 return 0;
  124 
  125         if (start == vma->vm_start) {
  126                 if (end == vma->vm_end)
  127                         retval = mlock_fixup_all(vma, newflags);
  128                 else
  129                         retval = mlock_fixup_start(vma, end, newflags);
  130         } else {
  131                 if (end == vma->vm_end)
  132                         retval = mlock_fixup_end(vma, start, newflags);
  133                 else
  134                         retval = mlock_fixup_middle(vma, start, end, newflags);
  135         }
  136         if (!retval) {
  137                 /* keep track of amount of locked VM */
  138                 pages = (end - start) >> PAGE_SHIFT;
  139                 if (newflags & VM_LOCKED) {
  140                         pages = -pages;
  141                         make_pages_present(start, end);
  142                 }
  143                 vma->vm_mm->locked_vm -= pages;
  144         }
  145         return retval;
  146 }
  147 
  148 static int do_mlock(unsigned long start, size_t len, int on)
  149 {
  150         unsigned long nstart, end, tmp;
  151         struct vm_area_struct * vma, * next;
  152         int error;
  153 
  154         if (on && !capable(CAP_IPC_LOCK))
  155                 return -EPERM;
  156         len = PAGE_ALIGN(len);
  157         end = start + len;
  158         if (end < start)
  159                 return -EINVAL;
  160         if (end == start)
  161                 return 0;
  162         vma = find_vma(current->mm, start);
  163         if (!vma || vma->vm_start > start)
  164                 return -ENOMEM;
  165 
  166         for (nstart = start ; ; ) {
  167                 unsigned int newflags;
  168 
  169                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
  170 
  171                 newflags = vma->vm_flags | VM_LOCKED;
  172                 if (!on)
  173                         newflags &= ~VM_LOCKED;
  174 
  175                 if (vma->vm_end >= end) {
  176                         error = mlock_fixup(vma, nstart, end, newflags);
  177                         break;
  178                 }
  179 
  180                 tmp = vma->vm_end;
  181                 next = vma->vm_next;
  182                 error = mlock_fixup(vma, nstart, tmp, newflags);
  183                 if (error)
  184                         break;
  185                 nstart = tmp;
  186                 vma = next;
  187                 if (!vma || vma->vm_start != nstart) {
  188                         error = -ENOMEM;
  189                         break;
  190                 }
  191         }
  192         return error;
  193 }
  194 
  195 asmlinkage long sys_mlock(unsigned long start, size_t len)
  196 {
  197         unsigned long locked;
  198         unsigned long lock_limit;
  199         int error = -ENOMEM;
  200 
  201         down_write(&current->mm->mmap_sem);
  202         len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  203         start &= PAGE_MASK;
  204 
  205         locked = len >> PAGE_SHIFT;
  206         locked += current->mm->locked_vm;
  207 
  208         lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
  209         lock_limit >>= PAGE_SHIFT;
  210 
  211         /* check against resource limits */
  212         if (locked > lock_limit)
  213                 goto out;
  214 
  215         /* we may lock at most half of physical memory... */
  216         /* (this check is pretty bogus, but doesn't hurt) */
  217         if (locked > num_physpages/2)
  218                 goto out;
  219 
  220         error = do_mlock(start, len, 1);
  221 out:
  222         up_write(&current->mm->mmap_sem);
  223         return error;
  224 }
  225 
  226 asmlinkage long sys_munlock(unsigned long start, size_t len)
  227 {
  228         int ret;
  229 
  230         down_write(&current->mm->mmap_sem);
  231         len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  232         start &= PAGE_MASK;
  233         ret = do_mlock(start, len, 0);
  234         up_write(&current->mm->mmap_sem);
  235         return ret;
  236 }
  237 
  238 static int do_mlockall(int flags)
  239 {
  240         int error;
  241         unsigned int def_flags;
  242         struct vm_area_struct * vma;
  243 
  244         if (!capable(CAP_IPC_LOCK))
  245                 return -EPERM;
  246 
  247         def_flags = 0;
  248         if (flags & MCL_FUTURE)
  249                 def_flags = VM_LOCKED;
  250         current->mm->def_flags = def_flags;
  251 
  252         error = 0;
  253         for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
  254                 unsigned int newflags;
  255 
  256                 newflags = vma->vm_flags | VM_LOCKED;
  257                 if (!(flags & MCL_CURRENT))
  258                         newflags &= ~VM_LOCKED;
  259                 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
  260                 if (error)
  261                         break;
  262         }
  263         return error;
  264 }
  265 
  266 asmlinkage long sys_mlockall(int flags)
  267 {
  268         unsigned long lock_limit;
  269         int ret = -EINVAL;
  270 
  271         down_write(&current->mm->mmap_sem);
  272         if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
  273                 goto out;
  274 
  275         lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
  276         lock_limit >>= PAGE_SHIFT;
  277 
  278         ret = -ENOMEM;
  279         if (current->mm->total_vm > lock_limit)
  280                 goto out;
  281 
  282         /* we may lock at most half of physical memory... */
  283         /* (this check is pretty bogus, but doesn't hurt) */
  284         if (current->mm->total_vm > num_physpages/2)
  285                 goto out;
  286 
  287         ret = do_mlockall(flags);
  288 out:
  289         up_write(&current->mm->mmap_sem);
  290         return ret;
  291 }
  292 
  293 asmlinkage long sys_munlockall(void)
  294 {
  295         int ret;
  296 
  297         down_write(&current->mm->mmap_sem);
  298         ret = do_mlockall(0);
  299         up_write(&current->mm->mmap_sem);
  300         return ret;
  301 }

Cache object: a2b06b78496d9dfb76631c9d3e686819


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.