The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/mprotect.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *      linux/mm/mprotect.c
    3  *
    4  *  (C) Copyright 1994 Linus Torvalds
    5  */
    6 #include <linux/slab.h>
    7 #include <linux/smp_lock.h>
    8 #include <linux/shm.h>
    9 #include <linux/mman.h>
   10 
   11 #include <asm/uaccess.h>
   12 #include <asm/pgalloc.h>
   13 #include <asm/pgtable.h>
   14 
   15 static inline void change_pte_range(pmd_t * pmd, unsigned long address,
   16         unsigned long size, pgprot_t newprot)
   17 {
   18         pte_t * pte;
   19         unsigned long end;
   20 
   21         if (pmd_none(*pmd))
   22                 return;
   23         if (pmd_bad(*pmd)) {
   24                 pmd_ERROR(*pmd);
   25                 pmd_clear(pmd);
   26                 return;
   27         }
   28         pte = pte_offset(pmd, address);
   29         address &= ~PMD_MASK;
   30         end = address + size;
   31         if (end > PMD_SIZE)
   32                 end = PMD_SIZE;
   33         do {
   34                 if (pte_present(*pte)) {
   35                         pte_t entry;
   36 
   37                         /* Avoid an SMP race with hardware updated dirty/clean
   38                          * bits by wiping the pte and then setting the new pte
   39                          * into place.
   40                          */
   41                         entry = ptep_get_and_clear(pte);
   42                         set_pte(pte, pte_modify(entry, newprot));
   43                 }
   44                 address += PAGE_SIZE;
   45                 pte++;
   46         } while (address && (address < end));
   47 }
   48 
   49 static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
   50         unsigned long size, pgprot_t newprot)
   51 {
   52         pmd_t * pmd;
   53         unsigned long end;
   54 
   55         if (pgd_none(*pgd))
   56                 return;
   57         if (pgd_bad(*pgd)) {
   58                 pgd_ERROR(*pgd);
   59                 pgd_clear(pgd);
   60                 return;
   61         }
   62         pmd = pmd_offset(pgd, address);
   63         address &= ~PGDIR_MASK;
   64         end = address + size;
   65         if (end > PGDIR_SIZE)
   66                 end = PGDIR_SIZE;
   67         do {
   68                 change_pte_range(pmd, address, end - address, newprot);
   69                 address = (address + PMD_SIZE) & PMD_MASK;
   70                 pmd++;
   71         } while (address && (address < end));
   72 }
   73 
   74 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
   75 {
   76         pgd_t *dir;
   77         unsigned long beg = start;
   78 
   79         dir = pgd_offset(current->mm, start);
   80         flush_cache_range(current->mm, beg, end);
   81         if (start >= end)
   82                 BUG();
   83         spin_lock(&current->mm->page_table_lock);
   84         do {
   85                 change_pmd_range(dir, start, end - start, newprot);
   86                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
   87                 dir++;
   88         } while (start && (start < end));
   89         spin_unlock(&current->mm->page_table_lock);
   90         flush_tlb_range(current->mm, beg, end);
   91         return;
   92 }
   93 
   94 static inline int mprotect_fixup_all(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
   95         int newflags, pgprot_t prot)
   96 {
   97         struct vm_area_struct * prev = *pprev;
   98         struct mm_struct * mm = vma->vm_mm;
   99 
  100         if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
  101             !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
  102                 spin_lock(&mm->page_table_lock);
  103                 prev->vm_end = vma->vm_end;
  104                 __vma_unlink(mm, vma, prev);
  105                 spin_unlock(&mm->page_table_lock);
  106 
  107                 kmem_cache_free(vm_area_cachep, vma);
  108                 mm->map_count--;
  109 
  110                 return 0;
  111         }
  112 
  113         spin_lock(&mm->page_table_lock);
  114         vma->vm_flags = newflags;
  115         vma->vm_page_prot = prot;
  116         spin_unlock(&mm->page_table_lock);
  117 
  118         *pprev = vma;
  119 
  120         return 0;
  121 }
  122 
  123 static inline int mprotect_fixup_start(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
  124         unsigned long end,
  125         int newflags, pgprot_t prot)
  126 {
  127         struct vm_area_struct * n, * prev = *pprev;
  128 
  129         *pprev = vma;
  130 
  131         if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
  132             !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
  133                 spin_lock(&vma->vm_mm->page_table_lock);
  134                 prev->vm_end = end;
  135                 vma->vm_start = end;
  136                 spin_unlock(&vma->vm_mm->page_table_lock);
  137 
  138                 return 0;
  139         }
  140         n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  141         if (!n)
  142                 return -ENOMEM;
  143         *n = *vma;
  144         n->vm_end = end;
  145         n->vm_flags = newflags;
  146         n->vm_raend = 0;
  147         n->vm_page_prot = prot;
  148         if (n->vm_file)
  149                 get_file(n->vm_file);
  150         if (n->vm_ops && n->vm_ops->open)
  151                 n->vm_ops->open(n);
  152         vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
  153         lock_vma_mappings(vma);
  154         spin_lock(&vma->vm_mm->page_table_lock);
  155         vma->vm_start = end;
  156         __insert_vm_struct(current->mm, n);
  157         spin_unlock(&vma->vm_mm->page_table_lock);
  158         unlock_vma_mappings(vma);
  159 
  160         return 0;
  161 }
  162 
  163 static inline int mprotect_fixup_end(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
  164         unsigned long start,
  165         int newflags, pgprot_t prot)
  166 {
  167         struct vm_area_struct * n;
  168 
  169         n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
  170         if (!n)
  171                 return -ENOMEM;
  172         *n = *vma;
  173         n->vm_start = start;
  174         n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
  175         n->vm_flags = newflags;
  176         n->vm_raend = 0;
  177         n->vm_page_prot = prot;
  178         if (n->vm_file)
  179                 get_file(n->vm_file);
  180         if (n->vm_ops && n->vm_ops->open)
  181                 n->vm_ops->open(n);
  182         lock_vma_mappings(vma);
  183         spin_lock(&vma->vm_mm->page_table_lock);
  184         vma->vm_end = start;
  185         __insert_vm_struct(current->mm, n);
  186         spin_unlock(&vma->vm_mm->page_table_lock);
  187         unlock_vma_mappings(vma);
  188 
  189         *pprev = n;
  190 
  191         return 0;
  192 }
  193 
  194 static inline int mprotect_fixup_middle(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
  195         unsigned long start, unsigned long end,
  196         int newflags, pgprot_t prot)
  197 {
  198         struct vm_area_struct * left, * right;
  199 
  200         left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  201         if (!left)
  202                 return -ENOMEM;
  203         right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  204         if (!right) {
  205                 kmem_cache_free(vm_area_cachep, left);
  206                 return -ENOMEM;
  207         }
  208         *left = *vma;
  209         *right = *vma;
  210         left->vm_end = start;
  211         right->vm_start = end;
  212         right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
  213         left->vm_raend = 0;
  214         right->vm_raend = 0;
  215         if (vma->vm_file)
  216                 atomic_add(2,&vma->vm_file->f_count);
  217         if (vma->vm_ops && vma->vm_ops->open) {
  218                 vma->vm_ops->open(left);
  219                 vma->vm_ops->open(right);
  220         }
  221         vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
  222         vma->vm_raend = 0;
  223         vma->vm_page_prot = prot;
  224         lock_vma_mappings(vma);
  225         spin_lock(&vma->vm_mm->page_table_lock);
  226         vma->vm_start = start;
  227         vma->vm_end = end;
  228         vma->vm_flags = newflags;
  229         __insert_vm_struct(current->mm, left);
  230         __insert_vm_struct(current->mm, right);
  231         spin_unlock(&vma->vm_mm->page_table_lock);
  232         unlock_vma_mappings(vma);
  233 
  234         *pprev = right;
  235 
  236         return 0;
  237 }
  238 
  239 static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
  240         unsigned long start, unsigned long end, unsigned int newflags)
  241 {
  242         pgprot_t newprot;
  243         int error;
  244 
  245         if (newflags == vma->vm_flags) {
  246                 *pprev = vma;
  247                 return 0;
  248         }
  249         newprot = protection_map[newflags & 0xf];
  250         if (start == vma->vm_start) {
  251                 if (end == vma->vm_end)
  252                         error = mprotect_fixup_all(vma, pprev, newflags, newprot);
  253                 else
  254                         error = mprotect_fixup_start(vma, pprev, end, newflags, newprot);
  255         } else if (end == vma->vm_end)
  256                 error = mprotect_fixup_end(vma, pprev, start, newflags, newprot);
  257         else
  258                 error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot);
  259 
  260         if (error)
  261                 return error;
  262 
  263         change_protection(start, end, newprot);
  264         return 0;
  265 }
  266 
  267 asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
  268 {
  269         unsigned long nstart, end, tmp;
  270         struct vm_area_struct * vma, * next, * prev;
  271         int error = -EINVAL;
  272 
  273         if (start & ~PAGE_MASK)
  274                 return -EINVAL;
  275         len = PAGE_ALIGN(len);
  276         end = start + len;
  277         if (end < start)
  278                 return -EINVAL;
  279         if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
  280                 return -EINVAL;
  281         if (end == start)
  282                 return 0;
  283 
  284         down_write(&current->mm->mmap_sem);
  285 
  286         vma = find_vma_prev(current->mm, start, &prev);
  287         error = -ENOMEM;
  288         if (!vma || vma->vm_start > start)
  289                 goto out;
  290 
  291         for (nstart = start ; ; ) {
  292                 unsigned int newflags;
  293                 int last = 0;
  294 
  295                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
  296 
  297                 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
  298                 if ((newflags & ~(newflags >> 4)) & 0xf) {
  299                         error = -EACCES;
  300                         goto out;
  301                 }
  302 
  303                 if (vma->vm_end > end) {
  304                         error = mprotect_fixup(vma, &prev, nstart, end, newflags);
  305                         goto out;
  306                 }
  307                 if (vma->vm_end == end)
  308                         last = 1;
  309 
  310                 tmp = vma->vm_end;
  311                 next = vma->vm_next;
  312                 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
  313                 if (error)
  314                         goto out;
  315                 if (last)
  316                         break;
  317                 nstart = tmp;
  318                 vma = next;
  319                 if (!vma || vma->vm_start != nstart) {
  320                         error = -ENOMEM;
  321                         goto out;
  322                 }
  323         }
  324         if (next && prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags) &&
  325             !prev->vm_file && !(prev->vm_flags & VM_SHARED)) {
  326                 spin_lock(&prev->vm_mm->page_table_lock);
  327                 prev->vm_end = next->vm_end;
  328                 __vma_unlink(prev->vm_mm, next, prev);
  329                 spin_unlock(&prev->vm_mm->page_table_lock);
  330 
  331                 kmem_cache_free(vm_area_cachep, next);
  332                 prev->vm_mm->map_count--;
  333         }
  334 out:
  335         up_write(&current->mm->mmap_sem);
  336         return error;
  337 }

Cache object: 5ff1839649d8caa52c0ca192edc88ce1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.