The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/exec.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  linux/fs/exec.c
    3  *
    4  *  Copyright (C) 1991, 1992  Linus Torvalds
    5  */
    6 
    7 /*
    8  * #!-checking implemented by tytso.
    9  */
   10 /*
   11  * Demand-loading implemented 01.12.91 - no need to read anything but
   12  * the header into memory. The inode of the executable is put into
   13  * "current->executable", and page faults do the actual loading. Clean.
   14  *
   15  * Once more I can proudly say that linux stood up to being changed: it
   16  * was less than 2 hours work to get demand-loading completely implemented.
   17  *
   18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
   19  * current->executable is only used by the procfs.  This allows a dispatch
   20  * table to check for several different types  of binary formats.  We keep
   21  * trying until we recognize the file or we run out of supported binary
   22  * formats. 
   23  */
   24 
   25 #include <linux/slab.h>
   26 #include <linux/file.h>
   27 #include <linux/fdtable.h>
   28 #include <linux/mm.h>
   29 #include <linux/stat.h>
   30 #include <linux/fcntl.h>
   31 #include <linux/swap.h>
   32 #include <linux/string.h>
   33 #include <linux/init.h>
   34 #include <linux/pagemap.h>
   35 #include <linux/perf_event.h>
   36 #include <linux/highmem.h>
   37 #include <linux/spinlock.h>
   38 #include <linux/key.h>
   39 #include <linux/personality.h>
   40 #include <linux/binfmts.h>
   41 #include <linux/utsname.h>
   42 #include <linux/pid_namespace.h>
   43 #include <linux/module.h>
   44 #include <linux/namei.h>
   45 #include <linux/mount.h>
   46 #include <linux/security.h>
   47 #include <linux/syscalls.h>
   48 #include <linux/tsacct_kern.h>
   49 #include <linux/cn_proc.h>
   50 #include <linux/audit.h>
   51 #include <linux/tracehook.h>
   52 #include <linux/kmod.h>
   53 #include <linux/fsnotify.h>
   54 #include <linux/fs_struct.h>
   55 #include <linux/pipe_fs_i.h>
   56 #include <linux/oom.h>
   57 #include <linux/compat.h>
   58 
   59 #include <asm/uaccess.h>
   60 #include <asm/mmu_context.h>
   61 #include <asm/tlb.h>
   62 
   63 #include <trace/events/task.h>
   64 #include "internal.h"
   65 #include "coredump.h"
   66 
   67 #include <trace/events/sched.h>
   68 
   69 int suid_dumpable = 0;
   70 
   71 static LIST_HEAD(formats);
   72 static DEFINE_RWLOCK(binfmt_lock);
   73 
   74 void __register_binfmt(struct linux_binfmt * fmt, int insert)
   75 {
   76         BUG_ON(!fmt);
   77         write_lock(&binfmt_lock);
   78         insert ? list_add(&fmt->lh, &formats) :
   79                  list_add_tail(&fmt->lh, &formats);
   80         write_unlock(&binfmt_lock);
   81 }
   82 
   83 EXPORT_SYMBOL(__register_binfmt);
   84 
   85 void unregister_binfmt(struct linux_binfmt * fmt)
   86 {
   87         write_lock(&binfmt_lock);
   88         list_del(&fmt->lh);
   89         write_unlock(&binfmt_lock);
   90 }
   91 
   92 EXPORT_SYMBOL(unregister_binfmt);
   93 
   94 static inline void put_binfmt(struct linux_binfmt * fmt)
   95 {
   96         module_put(fmt->module);
   97 }
   98 
   99 /*
  100  * Note that a shared library must be both readable and executable due to
  101  * security reasons.
  102  *
  103  * Also note that we take the address to load from from the file itself.
  104  */
  105 SYSCALL_DEFINE1(uselib, const char __user *, library)
  106 {
  107         struct file *file;
  108         struct filename *tmp = getname(library);
  109         int error = PTR_ERR(tmp);
  110         static const struct open_flags uselib_flags = {
  111                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
  112                 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
  113                 .intent = LOOKUP_OPEN
  114         };
  115 
  116         if (IS_ERR(tmp))
  117                 goto out;
  118 
  119         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW);
  120         putname(tmp);
  121         error = PTR_ERR(file);
  122         if (IS_ERR(file))
  123                 goto out;
  124 
  125         error = -EINVAL;
  126         if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
  127                 goto exit;
  128 
  129         error = -EACCES;
  130         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
  131                 goto exit;
  132 
  133         fsnotify_open(file);
  134 
  135         error = -ENOEXEC;
  136         if(file->f_op) {
  137                 struct linux_binfmt * fmt;
  138 
  139                 read_lock(&binfmt_lock);
  140                 list_for_each_entry(fmt, &formats, lh) {
  141                         if (!fmt->load_shlib)
  142                                 continue;
  143                         if (!try_module_get(fmt->module))
  144                                 continue;
  145                         read_unlock(&binfmt_lock);
  146                         error = fmt->load_shlib(file);
  147                         read_lock(&binfmt_lock);
  148                         put_binfmt(fmt);
  149                         if (error != -ENOEXEC)
  150                                 break;
  151                 }
  152                 read_unlock(&binfmt_lock);
  153         }
  154 exit:
  155         fput(file);
  156 out:
  157         return error;
  158 }
  159 
  160 #ifdef CONFIG_MMU
  161 /*
  162  * The nascent bprm->mm is not visible until exec_mmap() but it can
  163  * use a lot of memory, account these pages in current->mm temporary
  164  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
  165  * change the counter back via acct_arg_size(0).
  166  */
  167 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
  168 {
  169         struct mm_struct *mm = current->mm;
  170         long diff = (long)(pages - bprm->vma_pages);
  171 
  172         if (!mm || !diff)
  173                 return;
  174 
  175         bprm->vma_pages = pages;
  176         add_mm_counter(mm, MM_ANONPAGES, diff);
  177 }
  178 
  179 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
  180                 int write)
  181 {
  182         struct page *page;
  183         int ret;
  184 
  185 #ifdef CONFIG_STACK_GROWSUP
  186         if (write) {
  187                 ret = expand_downwards(bprm->vma, pos);
  188                 if (ret < 0)
  189                         return NULL;
  190         }
  191 #endif
  192         ret = get_user_pages(current, bprm->mm, pos,
  193                         1, write, 1, &page, NULL);
  194         if (ret <= 0)
  195                 return NULL;
  196 
  197         if (write) {
  198                 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
  199                 struct rlimit *rlim;
  200 
  201                 acct_arg_size(bprm, size / PAGE_SIZE);
  202 
  203                 /*
  204                  * We've historically supported up to 32 pages (ARG_MAX)
  205                  * of argument strings even with small stacks
  206                  */
  207                 if (size <= ARG_MAX)
  208                         return page;
  209 
  210                 /*
  211                  * Limit to 1/4-th the stack size for the argv+env strings.
  212                  * This ensures that:
  213                  *  - the remaining binfmt code will not run out of stack space,
  214                  *  - the program will have a reasonable amount of stack left
  215                  *    to work from.
  216                  */
  217                 rlim = current->signal->rlim;
  218                 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
  219                         put_page(page);
  220                         return NULL;
  221                 }
  222         }
  223 
  224         return page;
  225 }
  226 
  227 static void put_arg_page(struct page *page)
  228 {
  229         put_page(page);
  230 }
  231 
  232 static void free_arg_page(struct linux_binprm *bprm, int i)
  233 {
  234 }
  235 
  236 static void free_arg_pages(struct linux_binprm *bprm)
  237 {
  238 }
  239 
  240 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
  241                 struct page *page)
  242 {
  243         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
  244 }
  245 
  246 static int __bprm_mm_init(struct linux_binprm *bprm)
  247 {
  248         int err;
  249         struct vm_area_struct *vma = NULL;
  250         struct mm_struct *mm = bprm->mm;
  251 
  252         bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  253         if (!vma)
  254                 return -ENOMEM;
  255 
  256         down_write(&mm->mmap_sem);
  257         vma->vm_mm = mm;
  258 
  259         /*
  260          * Place the stack at the largest stack address the architecture
  261          * supports. Later, we'll move this to an appropriate place. We don't
  262          * use STACK_TOP because that can depend on attributes which aren't
  263          * configured yet.
  264          */
  265         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
  266         vma->vm_end = STACK_TOP_MAX;
  267         vma->vm_start = vma->vm_end - PAGE_SIZE;
  268         vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
  269         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  270         INIT_LIST_HEAD(&vma->anon_vma_chain);
  271 
  272         err = insert_vm_struct(mm, vma);
  273         if (err)
  274                 goto err;
  275 
  276         mm->stack_vm = mm->total_vm = 1;
  277         up_write(&mm->mmap_sem);
  278         bprm->p = vma->vm_end - sizeof(void *);
  279         return 0;
  280 err:
  281         up_write(&mm->mmap_sem);
  282         bprm->vma = NULL;
  283         kmem_cache_free(vm_area_cachep, vma);
  284         return err;
  285 }
  286 
  287 static bool valid_arg_len(struct linux_binprm *bprm, long len)
  288 {
  289         return len <= MAX_ARG_STRLEN;
  290 }
  291 
  292 #else
  293 
  294 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
  295 {
  296 }
  297 
  298 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
  299                 int write)
  300 {
  301         struct page *page;
  302 
  303         page = bprm->page[pos / PAGE_SIZE];
  304         if (!page && write) {
  305                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
  306                 if (!page)
  307                         return NULL;
  308                 bprm->page[pos / PAGE_SIZE] = page;
  309         }
  310 
  311         return page;
  312 }
  313 
  314 static void put_arg_page(struct page *page)
  315 {
  316 }
  317 
  318 static void free_arg_page(struct linux_binprm *bprm, int i)
  319 {
  320         if (bprm->page[i]) {
  321                 __free_page(bprm->page[i]);
  322                 bprm->page[i] = NULL;
  323         }
  324 }
  325 
  326 static void free_arg_pages(struct linux_binprm *bprm)
  327 {
  328         int i;
  329 
  330         for (i = 0; i < MAX_ARG_PAGES; i++)
  331                 free_arg_page(bprm, i);
  332 }
  333 
  334 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
  335                 struct page *page)
  336 {
  337 }
  338 
  339 static int __bprm_mm_init(struct linux_binprm *bprm)
  340 {
  341         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
  342         return 0;
  343 }
  344 
  345 static bool valid_arg_len(struct linux_binprm *bprm, long len)
  346 {
  347         return len <= bprm->p;
  348 }
  349 
  350 #endif /* CONFIG_MMU */
  351 
  352 /*
  353  * Create a new mm_struct and populate it with a temporary stack
  354  * vm_area_struct.  We don't have enough context at this point to set the stack
  355  * flags, permissions, and offset, so we use temporary values.  We'll update
  356  * them later in setup_arg_pages().
  357  */
  358 int bprm_mm_init(struct linux_binprm *bprm)
  359 {
  360         int err;
  361         struct mm_struct *mm = NULL;
  362 
  363         bprm->mm = mm = mm_alloc();
  364         err = -ENOMEM;
  365         if (!mm)
  366                 goto err;
  367 
  368         err = init_new_context(current, mm);
  369         if (err)
  370                 goto err;
  371 
  372         err = __bprm_mm_init(bprm);
  373         if (err)
  374                 goto err;
  375 
  376         return 0;
  377 
  378 err:
  379         if (mm) {
  380                 bprm->mm = NULL;
  381                 mmdrop(mm);
  382         }
  383 
  384         return err;
  385 }
  386 
  387 struct user_arg_ptr {
  388 #ifdef CONFIG_COMPAT
  389         bool is_compat;
  390 #endif
  391         union {
  392                 const char __user *const __user *native;
  393 #ifdef CONFIG_COMPAT
  394                 const compat_uptr_t __user *compat;
  395 #endif
  396         } ptr;
  397 };
  398 
  399 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
  400 {
  401         const char __user *native;
  402 
  403 #ifdef CONFIG_COMPAT
  404         if (unlikely(argv.is_compat)) {
  405                 compat_uptr_t compat;
  406 
  407                 if (get_user(compat, argv.ptr.compat + nr))
  408                         return ERR_PTR(-EFAULT);
  409 
  410                 return compat_ptr(compat);
  411         }
  412 #endif
  413 
  414         if (get_user(native, argv.ptr.native + nr))
  415                 return ERR_PTR(-EFAULT);
  416 
  417         return native;
  418 }
  419 
  420 /*
  421  * count() counts the number of strings in array ARGV.
  422  */
  423 static int count(struct user_arg_ptr argv, int max)
  424 {
  425         int i = 0;
  426 
  427         if (argv.ptr.native != NULL) {
  428                 for (;;) {
  429                         const char __user *p = get_user_arg_ptr(argv, i);
  430 
  431                         if (!p)
  432                                 break;
  433 
  434                         if (IS_ERR(p))
  435                                 return -EFAULT;
  436 
  437                         if (i >= max)
  438                                 return -E2BIG;
  439                         ++i;
  440 
  441                         if (fatal_signal_pending(current))
  442                                 return -ERESTARTNOHAND;
  443                         cond_resched();
  444                 }
  445         }
  446         return i;
  447 }
  448 
  449 /*
  450  * 'copy_strings()' copies argument/environment strings from the old
  451  * processes's memory to the new process's stack.  The call to get_user_pages()
  452  * ensures the destination page is created and not swapped out.
  453  */
  454 static int copy_strings(int argc, struct user_arg_ptr argv,
  455                         struct linux_binprm *bprm)
  456 {
  457         struct page *kmapped_page = NULL;
  458         char *kaddr = NULL;
  459         unsigned long kpos = 0;
  460         int ret;
  461 
  462         while (argc-- > 0) {
  463                 const char __user *str;
  464                 int len;
  465                 unsigned long pos;
  466 
  467                 ret = -EFAULT;
  468                 str = get_user_arg_ptr(argv, argc);
  469                 if (IS_ERR(str))
  470                         goto out;
  471 
  472                 len = strnlen_user(str, MAX_ARG_STRLEN);
  473                 if (!len)
  474                         goto out;
  475 
  476                 ret = -E2BIG;
  477                 if (!valid_arg_len(bprm, len))
  478                         goto out;
  479 
  480                 /* We're going to work our way backwords. */
  481                 pos = bprm->p;
  482                 str += len;
  483                 bprm->p -= len;
  484 
  485                 while (len > 0) {
  486                         int offset, bytes_to_copy;
  487 
  488                         if (fatal_signal_pending(current)) {
  489                                 ret = -ERESTARTNOHAND;
  490                                 goto out;
  491                         }
  492                         cond_resched();
  493 
  494                         offset = pos % PAGE_SIZE;
  495                         if (offset == 0)
  496                                 offset = PAGE_SIZE;
  497 
  498                         bytes_to_copy = offset;
  499                         if (bytes_to_copy > len)
  500                                 bytes_to_copy = len;
  501 
  502                         offset -= bytes_to_copy;
  503                         pos -= bytes_to_copy;
  504                         str -= bytes_to_copy;
  505                         len -= bytes_to_copy;
  506 
  507                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
  508                                 struct page *page;
  509 
  510                                 page = get_arg_page(bprm, pos, 1);
  511                                 if (!page) {
  512                                         ret = -E2BIG;
  513                                         goto out;
  514                                 }
  515 
  516                                 if (kmapped_page) {
  517                                         flush_kernel_dcache_page(kmapped_page);
  518                                         kunmap(kmapped_page);
  519                                         put_arg_page(kmapped_page);
  520                                 }
  521                                 kmapped_page = page;
  522                                 kaddr = kmap(kmapped_page);
  523                                 kpos = pos & PAGE_MASK;
  524                                 flush_arg_page(bprm, kpos, kmapped_page);
  525                         }
  526                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
  527                                 ret = -EFAULT;
  528                                 goto out;
  529                         }
  530                 }
  531         }
  532         ret = 0;
  533 out:
  534         if (kmapped_page) {
  535                 flush_kernel_dcache_page(kmapped_page);
  536                 kunmap(kmapped_page);
  537                 put_arg_page(kmapped_page);
  538         }
  539         return ret;
  540 }
  541 
  542 /*
  543  * Like copy_strings, but get argv and its values from kernel memory.
  544  */
  545 int copy_strings_kernel(int argc, const char *const *__argv,
  546                         struct linux_binprm *bprm)
  547 {
  548         int r;
  549         mm_segment_t oldfs = get_fs();
  550         struct user_arg_ptr argv = {
  551                 .ptr.native = (const char __user *const  __user *)__argv,
  552         };
  553 
  554         set_fs(KERNEL_DS);
  555         r = copy_strings(argc, argv, bprm);
  556         set_fs(oldfs);
  557 
  558         return r;
  559 }
  560 EXPORT_SYMBOL(copy_strings_kernel);
  561 
  562 #ifdef CONFIG_MMU
  563 
  564 /*
  565  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
  566  * the binfmt code determines where the new stack should reside, we shift it to
  567  * its final location.  The process proceeds as follows:
  568  *
  569  * 1) Use shift to calculate the new vma endpoints.
  570  * 2) Extend vma to cover both the old and new ranges.  This ensures the
  571  *    arguments passed to subsequent functions are consistent.
  572  * 3) Move vma's page tables to the new range.
  573  * 4) Free up any cleared pgd range.
  574  * 5) Shrink the vma to cover only the new range.
  575  */
  576 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
  577 {
  578         struct mm_struct *mm = vma->vm_mm;
  579         unsigned long old_start = vma->vm_start;
  580         unsigned long old_end = vma->vm_end;
  581         unsigned long length = old_end - old_start;
  582         unsigned long new_start = old_start - shift;
  583         unsigned long new_end = old_end - shift;
  584         struct mmu_gather tlb;
  585 
  586         BUG_ON(new_start > new_end);
  587 
  588         /*
  589          * ensure there are no vmas between where we want to go
  590          * and where we are
  591          */
  592         if (vma != find_vma(mm, new_start))
  593                 return -EFAULT;
  594 
  595         /*
  596          * cover the whole range: [new_start, old_end)
  597          */
  598         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
  599                 return -ENOMEM;
  600 
  601         /*
  602          * move the page tables downwards, on failure we rely on
  603          * process cleanup to remove whatever mess we made.
  604          */
  605         if (length != move_page_tables(vma, old_start,
  606                                        vma, new_start, length, false))
  607                 return -ENOMEM;
  608 
  609         lru_add_drain();
  610         tlb_gather_mmu(&tlb, mm, 0);
  611         if (new_end > old_start) {
  612                 /*
  613                  * when the old and new regions overlap clear from new_end.
  614                  */
  615                 free_pgd_range(&tlb, new_end, old_end, new_end,
  616                         vma->vm_next ? vma->vm_next->vm_start : 0);
  617         } else {
  618                 /*
  619                  * otherwise, clean from old_start; this is done to not touch
  620                  * the address space in [new_end, old_start) some architectures
  621                  * have constraints on va-space that make this illegal (IA64) -
  622                  * for the others its just a little faster.
  623                  */
  624                 free_pgd_range(&tlb, old_start, old_end, new_end,
  625                         vma->vm_next ? vma->vm_next->vm_start : 0);
  626         }
  627         tlb_finish_mmu(&tlb, new_end, old_end);
  628 
  629         /*
  630          * Shrink the vma to just the new range.  Always succeeds.
  631          */
  632         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
  633 
  634         return 0;
  635 }
  636 
  637 /*
  638  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
  639  * the stack is optionally relocated, and some extra space is added.
  640  */
  641 int setup_arg_pages(struct linux_binprm *bprm,
  642                     unsigned long stack_top,
  643                     int executable_stack)
  644 {
  645         unsigned long ret;
  646         unsigned long stack_shift;
  647         struct mm_struct *mm = current->mm;
  648         struct vm_area_struct *vma = bprm->vma;
  649         struct vm_area_struct *prev = NULL;
  650         unsigned long vm_flags;
  651         unsigned long stack_base;
  652         unsigned long stack_size;
  653         unsigned long stack_expand;
  654         unsigned long rlim_stack;
  655 
  656 #ifdef CONFIG_STACK_GROWSUP
  657         /* Limit stack size to 1GB */
  658         stack_base = rlimit_max(RLIMIT_STACK);
  659         if (stack_base > (1 << 30))
  660                 stack_base = 1 << 30;
  661 
  662         /* Make sure we didn't let the argument array grow too large. */
  663         if (vma->vm_end - vma->vm_start > stack_base)
  664                 return -ENOMEM;
  665 
  666         stack_base = PAGE_ALIGN(stack_top - stack_base);
  667 
  668         stack_shift = vma->vm_start - stack_base;
  669         mm->arg_start = bprm->p - stack_shift;
  670         bprm->p = vma->vm_end - stack_shift;
  671 #else
  672         stack_top = arch_align_stack(stack_top);
  673         stack_top = PAGE_ALIGN(stack_top);
  674 
  675         if (unlikely(stack_top < mmap_min_addr) ||
  676             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
  677                 return -ENOMEM;
  678 
  679         stack_shift = vma->vm_end - stack_top;
  680 
  681         bprm->p -= stack_shift;
  682         mm->arg_start = bprm->p;
  683 #endif
  684 
  685         if (bprm->loader)
  686                 bprm->loader -= stack_shift;
  687         bprm->exec -= stack_shift;
  688 
  689         down_write(&mm->mmap_sem);
  690         vm_flags = VM_STACK_FLAGS;
  691 
  692         /*
  693          * Adjust stack execute permissions; explicitly enable for
  694          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
  695          * (arch default) otherwise.
  696          */
  697         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
  698                 vm_flags |= VM_EXEC;
  699         else if (executable_stack == EXSTACK_DISABLE_X)
  700                 vm_flags &= ~VM_EXEC;
  701         vm_flags |= mm->def_flags;
  702         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
  703 
  704         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
  705                         vm_flags);
  706         if (ret)
  707                 goto out_unlock;
  708         BUG_ON(prev != vma);
  709 
  710         /* Move stack pages down in memory. */
  711         if (stack_shift) {
  712                 ret = shift_arg_pages(vma, stack_shift);
  713                 if (ret)
  714                         goto out_unlock;
  715         }
  716 
  717         /* mprotect_fixup is overkill to remove the temporary stack flags */
  718         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
  719 
  720         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
  721         stack_size = vma->vm_end - vma->vm_start;
  722         /*
  723          * Align this down to a page boundary as expand_stack
  724          * will align it up.
  725          */
  726         rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
  727 #ifdef CONFIG_STACK_GROWSUP
  728         if (stack_size + stack_expand > rlim_stack)
  729                 stack_base = vma->vm_start + rlim_stack;
  730         else
  731                 stack_base = vma->vm_end + stack_expand;
  732 #else
  733         if (stack_size + stack_expand > rlim_stack)
  734                 stack_base = vma->vm_end - rlim_stack;
  735         else
  736                 stack_base = vma->vm_start - stack_expand;
  737 #endif
  738         current->mm->start_stack = bprm->p;
  739         ret = expand_stack(vma, stack_base);
  740         if (ret)
  741                 ret = -EFAULT;
  742 
  743 out_unlock:
  744         up_write(&mm->mmap_sem);
  745         return ret;
  746 }
  747 EXPORT_SYMBOL(setup_arg_pages);
  748 
  749 #endif /* CONFIG_MMU */
  750 
  751 struct file *open_exec(const char *name)
  752 {
  753         struct file *file;
  754         int err;
  755         struct filename tmp = { .name = name };
  756         static const struct open_flags open_exec_flags = {
  757                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
  758                 .acc_mode = MAY_EXEC | MAY_OPEN,
  759                 .intent = LOOKUP_OPEN
  760         };
  761 
  762         file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags, LOOKUP_FOLLOW);
  763         if (IS_ERR(file))
  764                 goto out;
  765 
  766         err = -EACCES;
  767         if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
  768                 goto exit;
  769 
  770         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
  771                 goto exit;
  772 
  773         fsnotify_open(file);
  774 
  775         err = deny_write_access(file);
  776         if (err)
  777                 goto exit;
  778 
  779 out:
  780         return file;
  781 
  782 exit:
  783         fput(file);
  784         return ERR_PTR(err);
  785 }
  786 EXPORT_SYMBOL(open_exec);
  787 
  788 int kernel_read(struct file *file, loff_t offset,
  789                 char *addr, unsigned long count)
  790 {
  791         mm_segment_t old_fs;
  792         loff_t pos = offset;
  793         int result;
  794 
  795         old_fs = get_fs();
  796         set_fs(get_ds());
  797         /* The cast to a user pointer is valid due to the set_fs() */
  798         result = vfs_read(file, (void __user *)addr, count, &pos);
  799         set_fs(old_fs);
  800         return result;
  801 }
  802 
  803 EXPORT_SYMBOL(kernel_read);
  804 
  805 static int exec_mmap(struct mm_struct *mm)
  806 {
  807         struct task_struct *tsk;
  808         struct mm_struct * old_mm, *active_mm;
  809 
  810         /* Notify parent that we're no longer interested in the old VM */
  811         tsk = current;
  812         old_mm = current->mm;
  813         mm_release(tsk, old_mm);
  814 
  815         if (old_mm) {
  816                 sync_mm_rss(old_mm);
  817                 /*
  818                  * Make sure that if there is a core dump in progress
  819                  * for the old mm, we get out and die instead of going
  820                  * through with the exec.  We must hold mmap_sem around
  821                  * checking core_state and changing tsk->mm.
  822                  */
  823                 down_read(&old_mm->mmap_sem);
  824                 if (unlikely(old_mm->core_state)) {
  825                         up_read(&old_mm->mmap_sem);
  826                         return -EINTR;
  827                 }
  828         }
  829         task_lock(tsk);
  830         active_mm = tsk->active_mm;
  831         tsk->mm = mm;
  832         tsk->active_mm = mm;
  833         activate_mm(active_mm, mm);
  834         task_unlock(tsk);
  835         arch_pick_mmap_layout(mm);
  836         if (old_mm) {
  837                 up_read(&old_mm->mmap_sem);
  838                 BUG_ON(active_mm != old_mm);
  839                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
  840                 mm_update_next_owner(old_mm);
  841                 mmput(old_mm);
  842                 return 0;
  843         }
  844         mmdrop(active_mm);
  845         return 0;
  846 }
  847 
  848 /*
  849  * This function makes sure the current process has its own signal table,
  850  * so that flush_signal_handlers can later reset the handlers without
  851  * disturbing other processes.  (Other processes might share the signal
  852  * table via the CLONE_SIGHAND option to clone().)
  853  */
  854 static int de_thread(struct task_struct *tsk)
  855 {
  856         struct signal_struct *sig = tsk->signal;
  857         struct sighand_struct *oldsighand = tsk->sighand;
  858         spinlock_t *lock = &oldsighand->siglock;
  859 
  860         if (thread_group_empty(tsk))
  861                 goto no_thread_group;
  862 
  863         /*
  864          * Kill all other threads in the thread group.
  865          */
  866         spin_lock_irq(lock);
  867         if (signal_group_exit(sig)) {
  868                 /*
  869                  * Another group action in progress, just
  870                  * return so that the signal is processed.
  871                  */
  872                 spin_unlock_irq(lock);
  873                 return -EAGAIN;
  874         }
  875 
  876         sig->group_exit_task = tsk;
  877         sig->notify_count = zap_other_threads(tsk);
  878         if (!thread_group_leader(tsk))
  879                 sig->notify_count--;
  880 
  881         while (sig->notify_count) {
  882                 __set_current_state(TASK_KILLABLE);
  883                 spin_unlock_irq(lock);
  884                 schedule();
  885                 if (unlikely(__fatal_signal_pending(tsk)))
  886                         goto killed;
  887                 spin_lock_irq(lock);
  888         }
  889         spin_unlock_irq(lock);
  890 
  891         /*
  892          * At this point all other threads have exited, all we have to
  893          * do is to wait for the thread group leader to become inactive,
  894          * and to assume its PID:
  895          */
  896         if (!thread_group_leader(tsk)) {
  897                 struct task_struct *leader = tsk->group_leader;
  898 
  899                 sig->notify_count = -1; /* for exit_notify() */
  900                 for (;;) {
  901                         write_lock_irq(&tasklist_lock);
  902                         if (likely(leader->exit_state))
  903                                 break;
  904                         __set_current_state(TASK_KILLABLE);
  905                         write_unlock_irq(&tasklist_lock);
  906                         schedule();
  907                         if (unlikely(__fatal_signal_pending(tsk)))
  908                                 goto killed;
  909                 }
  910 
  911                 /*
  912                  * The only record we have of the real-time age of a
  913                  * process, regardless of execs it's done, is start_time.
  914                  * All the past CPU time is accumulated in signal_struct
  915                  * from sister threads now dead.  But in this non-leader
  916                  * exec, nothing survives from the original leader thread,
  917                  * whose birth marks the true age of this process now.
  918                  * When we take on its identity by switching to its PID, we
  919                  * also take its birthdate (always earlier than our own).
  920                  */
  921                 tsk->start_time = leader->start_time;
  922 
  923                 BUG_ON(!same_thread_group(leader, tsk));
  924                 BUG_ON(has_group_leader_pid(tsk));
  925                 /*
  926                  * An exec() starts a new thread group with the
  927                  * TGID of the previous thread group. Rehash the
  928                  * two threads with a switched PID, and release
  929                  * the former thread group leader:
  930                  */
  931 
  932                 /* Become a process group leader with the old leader's pid.
  933                  * The old leader becomes a thread of the this thread group.
  934                  * Note: The old leader also uses this pid until release_task
  935                  *       is called.  Odd but simple and correct.
  936                  */
  937                 detach_pid(tsk, PIDTYPE_PID);
  938                 tsk->pid = leader->pid;
  939                 attach_pid(tsk, PIDTYPE_PID,  task_pid(leader));
  940                 transfer_pid(leader, tsk, PIDTYPE_PGID);
  941                 transfer_pid(leader, tsk, PIDTYPE_SID);
  942 
  943                 list_replace_rcu(&leader->tasks, &tsk->tasks);
  944                 list_replace_init(&leader->sibling, &tsk->sibling);
  945 
  946                 tsk->group_leader = tsk;
  947                 leader->group_leader = tsk;
  948 
  949                 tsk->exit_signal = SIGCHLD;
  950                 leader->exit_signal = -1;
  951 
  952                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
  953                 leader->exit_state = EXIT_DEAD;
  954 
  955                 /*
  956                  * We are going to release_task()->ptrace_unlink() silently,
  957                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
  958                  * the tracer wont't block again waiting for this thread.
  959                  */
  960                 if (unlikely(leader->ptrace))
  961                         __wake_up_parent(leader, leader->parent);
  962                 write_unlock_irq(&tasklist_lock);
  963 
  964                 release_task(leader);
  965         }
  966 
  967         sig->group_exit_task = NULL;
  968         sig->notify_count = 0;
  969 
  970 no_thread_group:
  971         /* we have changed execution domain */
  972         tsk->exit_signal = SIGCHLD;
  973 
  974         exit_itimers(sig);
  975         flush_itimer_signals();
  976 
  977         if (atomic_read(&oldsighand->count) != 1) {
  978                 struct sighand_struct *newsighand;
  979                 /*
  980                  * This ->sighand is shared with the CLONE_SIGHAND
  981                  * but not CLONE_THREAD task, switch to the new one.
  982                  */
  983                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
  984                 if (!newsighand)
  985                         return -ENOMEM;
  986 
  987                 atomic_set(&newsighand->count, 1);
  988                 memcpy(newsighand->action, oldsighand->action,
  989                        sizeof(newsighand->action));
  990 
  991                 write_lock_irq(&tasklist_lock);
  992                 spin_lock(&oldsighand->siglock);
  993                 rcu_assign_pointer(tsk->sighand, newsighand);
  994                 spin_unlock(&oldsighand->siglock);
  995                 write_unlock_irq(&tasklist_lock);
  996 
  997                 __cleanup_sighand(oldsighand);
  998         }
  999 
 1000         BUG_ON(!thread_group_leader(tsk));
 1001         return 0;
 1002 
 1003 killed:
 1004         /* protects against exit_notify() and __exit_signal() */
 1005         read_lock(&tasklist_lock);
 1006         sig->group_exit_task = NULL;
 1007         sig->notify_count = 0;
 1008         read_unlock(&tasklist_lock);
 1009         return -EAGAIN;
 1010 }
 1011 
 1012 char *get_task_comm(char *buf, struct task_struct *tsk)
 1013 {
 1014         /* buf must be at least sizeof(tsk->comm) in size */
 1015         task_lock(tsk);
 1016         strncpy(buf, tsk->comm, sizeof(tsk->comm));
 1017         task_unlock(tsk);
 1018         return buf;
 1019 }
 1020 EXPORT_SYMBOL_GPL(get_task_comm);
 1021 
 1022 /*
 1023  * These functions flushes out all traces of the currently running executable
 1024  * so that a new one can be started
 1025  */
 1026 
 1027 void set_task_comm(struct task_struct *tsk, char *buf)
 1028 {
 1029         task_lock(tsk);
 1030 
 1031         trace_task_rename(tsk, buf);
 1032 
 1033         /*
 1034          * Threads may access current->comm without holding
 1035          * the task lock, so write the string carefully.
 1036          * Readers without a lock may see incomplete new
 1037          * names but are safe from non-terminating string reads.
 1038          */
 1039         memset(tsk->comm, 0, TASK_COMM_LEN);
 1040         wmb();
 1041         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
 1042         task_unlock(tsk);
 1043         perf_event_comm(tsk);
 1044 }
 1045 
 1046 static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
 1047 {
 1048         int i, ch;
 1049 
 1050         /* Copies the binary name from after last slash */
 1051         for (i = 0; (ch = *(fn++)) != '\0';) {
 1052                 if (ch == '/')
 1053                         i = 0; /* overwrite what we wrote */
 1054                 else
 1055                         if (i < len - 1)
 1056                                 tcomm[i++] = ch;
 1057         }
 1058         tcomm[i] = '\0';
 1059 }
 1060 
 1061 int flush_old_exec(struct linux_binprm * bprm)
 1062 {
 1063         int retval;
 1064 
 1065         /*
 1066          * Make sure we have a private signal table and that
 1067          * we are unassociated from the previous thread group.
 1068          */
 1069         retval = de_thread(current);
 1070         if (retval)
 1071                 goto out;
 1072 
 1073         set_mm_exe_file(bprm->mm, bprm->file);
 1074 
 1075         filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
 1076         /*
 1077          * Release all of the old mmap stuff
 1078          */
 1079         acct_arg_size(bprm, 0);
 1080         retval = exec_mmap(bprm->mm);
 1081         if (retval)
 1082                 goto out;
 1083 
 1084         bprm->mm = NULL;                /* We're using it now */
 1085 
 1086         set_fs(USER_DS);
 1087         current->flags &=
 1088                 ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE);
 1089         flush_thread();
 1090         current->personality &= ~bprm->per_clear;
 1091 
 1092         return 0;
 1093 
 1094 out:
 1095         return retval;
 1096 }
 1097 EXPORT_SYMBOL(flush_old_exec);
 1098 
 1099 void would_dump(struct linux_binprm *bprm, struct file *file)
 1100 {
 1101         if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0)
 1102                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
 1103 }
 1104 EXPORT_SYMBOL(would_dump);
 1105 
 1106 void setup_new_exec(struct linux_binprm * bprm)
 1107 {
 1108         arch_pick_mmap_layout(current->mm);
 1109 
 1110         /* This is the point of no return */
 1111         current->sas_ss_sp = current->sas_ss_size = 0;
 1112 
 1113         if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
 1114                 set_dumpable(current->mm, SUID_DUMPABLE_ENABLED);
 1115         else
 1116                 set_dumpable(current->mm, suid_dumpable);
 1117 
 1118         set_task_comm(current, bprm->tcomm);
 1119 
 1120         /* Set the new mm task size. We have to do that late because it may
 1121          * depend on TIF_32BIT which is only updated in flush_thread() on
 1122          * some architectures like powerpc
 1123          */
 1124         current->mm->task_size = TASK_SIZE;
 1125 
 1126         /* install the new credentials */
 1127         if (!uid_eq(bprm->cred->uid, current_euid()) ||
 1128             !gid_eq(bprm->cred->gid, current_egid())) {
 1129                 current->pdeath_signal = 0;
 1130         } else {
 1131                 would_dump(bprm, bprm->file);
 1132                 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
 1133                         set_dumpable(current->mm, suid_dumpable);
 1134         }
 1135 
 1136         /*
 1137          * Flush performance counters when crossing a
 1138          * security domain:
 1139          */
 1140         if (!get_dumpable(current->mm))
 1141                 perf_event_exit_task(current);
 1142 
 1143         /* An exec changes our domain. We are no longer part of the thread
 1144            group */
 1145 
 1146         current->self_exec_id++;
 1147                         
 1148         flush_signal_handlers(current, 0);
 1149         do_close_on_exec(current->files);
 1150 }
 1151 EXPORT_SYMBOL(setup_new_exec);
 1152 
 1153 /*
 1154  * Prepare credentials and lock ->cred_guard_mutex.
 1155  * install_exec_creds() commits the new creds and drops the lock.
 1156  * Or, if exec fails before, free_bprm() should release ->cred and
 1157  * and unlock.
 1158  */
 1159 int prepare_bprm_creds(struct linux_binprm *bprm)
 1160 {
 1161         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
 1162                 return -ERESTARTNOINTR;
 1163 
 1164         bprm->cred = prepare_exec_creds();
 1165         if (likely(bprm->cred))
 1166                 return 0;
 1167 
 1168         mutex_unlock(&current->signal->cred_guard_mutex);
 1169         return -ENOMEM;
 1170 }
 1171 
 1172 void free_bprm(struct linux_binprm *bprm)
 1173 {
 1174         free_arg_pages(bprm);
 1175         if (bprm->cred) {
 1176                 mutex_unlock(&current->signal->cred_guard_mutex);
 1177                 abort_creds(bprm->cred);
 1178         }
 1179         /* If a binfmt changed the interp, free it. */
 1180         if (bprm->interp != bprm->filename)
 1181                 kfree(bprm->interp);
 1182         kfree(bprm);
 1183 }
 1184 
 1185 int bprm_change_interp(char *interp, struct linux_binprm *bprm)
 1186 {
 1187         /* If a binfmt changed the interp, free it first. */
 1188         if (bprm->interp != bprm->filename)
 1189                 kfree(bprm->interp);
 1190         bprm->interp = kstrdup(interp, GFP_KERNEL);
 1191         if (!bprm->interp)
 1192                 return -ENOMEM;
 1193         return 0;
 1194 }
 1195 EXPORT_SYMBOL(bprm_change_interp);
 1196 
 1197 /*
 1198  * install the new credentials for this executable
 1199  */
 1200 void install_exec_creds(struct linux_binprm *bprm)
 1201 {
 1202         security_bprm_committing_creds(bprm);
 1203 
 1204         commit_creds(bprm->cred);
 1205         bprm->cred = NULL;
 1206         /*
 1207          * cred_guard_mutex must be held at least to this point to prevent
 1208          * ptrace_attach() from altering our determination of the task's
 1209          * credentials; any time after this it may be unlocked.
 1210          */
 1211         security_bprm_committed_creds(bprm);
 1212         mutex_unlock(&current->signal->cred_guard_mutex);
 1213 }
 1214 EXPORT_SYMBOL(install_exec_creds);
 1215 
 1216 /*
 1217  * determine how safe it is to execute the proposed program
 1218  * - the caller must hold ->cred_guard_mutex to protect against
 1219  *   PTRACE_ATTACH
 1220  */
 1221 static int check_unsafe_exec(struct linux_binprm *bprm)
 1222 {
 1223         struct task_struct *p = current, *t;
 1224         unsigned n_fs;
 1225         int res = 0;
 1226 
 1227         if (p->ptrace) {
 1228                 if (p->ptrace & PT_PTRACE_CAP)
 1229                         bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
 1230                 else
 1231                         bprm->unsafe |= LSM_UNSAFE_PTRACE;
 1232         }
 1233 
 1234         /*
 1235          * This isn't strictly necessary, but it makes it harder for LSMs to
 1236          * mess up.
 1237          */
 1238         if (current->no_new_privs)
 1239                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
 1240 
 1241         n_fs = 1;
 1242         spin_lock(&p->fs->lock);
 1243         rcu_read_lock();
 1244         for (t = next_thread(p); t != p; t = next_thread(t)) {
 1245                 if (t->fs == p->fs)
 1246                         n_fs++;
 1247         }
 1248         rcu_read_unlock();
 1249 
 1250         if (p->fs->users > n_fs) {
 1251                 bprm->unsafe |= LSM_UNSAFE_SHARE;
 1252         } else {
 1253                 res = -EAGAIN;
 1254                 if (!p->fs->in_exec) {
 1255                         p->fs->in_exec = 1;
 1256                         res = 1;
 1257                 }
 1258         }
 1259         spin_unlock(&p->fs->lock);
 1260 
 1261         return res;
 1262 }
 1263 
 1264 /* 
 1265  * Fill the binprm structure from the inode. 
 1266  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
 1267  *
 1268  * This may be called multiple times for binary chains (scripts for example).
 1269  */
 1270 int prepare_binprm(struct linux_binprm *bprm)
 1271 {
 1272         umode_t mode;
 1273         struct inode * inode = bprm->file->f_path.dentry->d_inode;
 1274         int retval;
 1275 
 1276         mode = inode->i_mode;
 1277         if (bprm->file->f_op == NULL)
 1278                 return -EACCES;
 1279 
 1280         /* clear any previous set[ug]id data from a previous binary */
 1281         bprm->cred->euid = current_euid();
 1282         bprm->cred->egid = current_egid();
 1283 
 1284         if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
 1285             !current->no_new_privs &&
 1286             kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
 1287             kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
 1288                 /* Set-uid? */
 1289                 if (mode & S_ISUID) {
 1290                         bprm->per_clear |= PER_CLEAR_ON_SETID;
 1291                         bprm->cred->euid = inode->i_uid;
 1292                 }
 1293 
 1294                 /* Set-gid? */
 1295                 /*
 1296                  * If setgid is set but no group execute bit then this
 1297                  * is a candidate for mandatory locking, not a setgid
 1298                  * executable.
 1299                  */
 1300                 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
 1301                         bprm->per_clear |= PER_CLEAR_ON_SETID;
 1302                         bprm->cred->egid = inode->i_gid;
 1303                 }
 1304         }
 1305 
 1306         /* fill in binprm security blob */
 1307         retval = security_bprm_set_creds(bprm);
 1308         if (retval)
 1309                 return retval;
 1310         bprm->cred_prepared = 1;
 1311 
 1312         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
 1313         return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
 1314 }
 1315 
 1316 EXPORT_SYMBOL(prepare_binprm);
 1317 
 1318 /*
 1319  * Arguments are '\0' separated strings found at the location bprm->p
 1320  * points to; chop off the first by relocating brpm->p to right after
 1321  * the first '\0' encountered.
 1322  */
 1323 int remove_arg_zero(struct linux_binprm *bprm)
 1324 {
 1325         int ret = 0;
 1326         unsigned long offset;
 1327         char *kaddr;
 1328         struct page *page;
 1329 
 1330         if (!bprm->argc)
 1331                 return 0;
 1332 
 1333         do {
 1334                 offset = bprm->p & ~PAGE_MASK;
 1335                 page = get_arg_page(bprm, bprm->p, 0);
 1336                 if (!page) {
 1337                         ret = -EFAULT;
 1338                         goto out;
 1339                 }
 1340                 kaddr = kmap_atomic(page);
 1341 
 1342                 for (; offset < PAGE_SIZE && kaddr[offset];
 1343                                 offset++, bprm->p++)
 1344                         ;
 1345 
 1346                 kunmap_atomic(kaddr);
 1347                 put_arg_page(page);
 1348 
 1349                 if (offset == PAGE_SIZE)
 1350                         free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
 1351         } while (offset == PAGE_SIZE);
 1352 
 1353         bprm->p++;
 1354         bprm->argc--;
 1355         ret = 0;
 1356 
 1357 out:
 1358         return ret;
 1359 }
 1360 EXPORT_SYMBOL(remove_arg_zero);
 1361 
 1362 /*
 1363  * cycle the list of binary formats handler, until one recognizes the image
 1364  */
 1365 int search_binary_handler(struct linux_binprm *bprm)
 1366 {
 1367         unsigned int depth = bprm->recursion_depth;
 1368         int try,retval;
 1369         struct linux_binfmt *fmt;
 1370         pid_t old_pid, old_vpid;
 1371 
 1372         /* This allows 4 levels of binfmt rewrites before failing hard. */
 1373         if (depth > 5)
 1374                 return -ELOOP;
 1375 
 1376         retval = security_bprm_check(bprm);
 1377         if (retval)
 1378                 return retval;
 1379 
 1380         retval = audit_bprm(bprm);
 1381         if (retval)
 1382                 return retval;
 1383 
 1384         /* Need to fetch pid before load_binary changes it */
 1385         old_pid = current->pid;
 1386         rcu_read_lock();
 1387         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
 1388         rcu_read_unlock();
 1389 
 1390         retval = -ENOENT;
 1391         for (try=0; try<2; try++) {
 1392                 read_lock(&binfmt_lock);
 1393                 list_for_each_entry(fmt, &formats, lh) {
 1394                         int (*fn)(struct linux_binprm *) = fmt->load_binary;
 1395                         if (!fn)
 1396                                 continue;
 1397                         if (!try_module_get(fmt->module))
 1398                                 continue;
 1399                         read_unlock(&binfmt_lock);
 1400                         bprm->recursion_depth = depth + 1;
 1401                         retval = fn(bprm);
 1402                         bprm->recursion_depth = depth;
 1403                         if (retval >= 0) {
 1404                                 if (depth == 0) {
 1405                                         trace_sched_process_exec(current, old_pid, bprm);
 1406                                         ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
 1407                                 }
 1408                                 put_binfmt(fmt);
 1409                                 allow_write_access(bprm->file);
 1410                                 if (bprm->file)
 1411                                         fput(bprm->file);
 1412                                 bprm->file = NULL;
 1413                                 current->did_exec = 1;
 1414                                 proc_exec_connector(current);
 1415                                 return retval;
 1416                         }
 1417                         read_lock(&binfmt_lock);
 1418                         put_binfmt(fmt);
 1419                         if (retval != -ENOEXEC || bprm->mm == NULL)
 1420                                 break;
 1421                         if (!bprm->file) {
 1422                                 read_unlock(&binfmt_lock);
 1423                                 return retval;
 1424                         }
 1425                 }
 1426                 read_unlock(&binfmt_lock);
 1427 #ifdef CONFIG_MODULES
 1428                 if (retval != -ENOEXEC || bprm->mm == NULL) {
 1429                         break;
 1430                 } else {
 1431 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
 1432                         if (printable(bprm->buf[0]) &&
 1433                             printable(bprm->buf[1]) &&
 1434                             printable(bprm->buf[2]) &&
 1435                             printable(bprm->buf[3]))
 1436                                 break; /* -ENOEXEC */
 1437                         if (try)
 1438                                 break; /* -ENOEXEC */
 1439                         request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
 1440                 }
 1441 #else
 1442                 break;
 1443 #endif
 1444         }
 1445         return retval;
 1446 }
 1447 
 1448 EXPORT_SYMBOL(search_binary_handler);
 1449 
 1450 /*
 1451  * sys_execve() executes a new program.
 1452  */
 1453 static int do_execve_common(const char *filename,
 1454                                 struct user_arg_ptr argv,
 1455                                 struct user_arg_ptr envp)
 1456 {
 1457         struct linux_binprm *bprm;
 1458         struct file *file;
 1459         struct files_struct *displaced;
 1460         bool clear_in_exec;
 1461         int retval;
 1462         const struct cred *cred = current_cred();
 1463 
 1464         /*
 1465          * We move the actual failure in case of RLIMIT_NPROC excess from
 1466          * set*uid() to execve() because too many poorly written programs
 1467          * don't check setuid() return code.  Here we additionally recheck
 1468          * whether NPROC limit is still exceeded.
 1469          */
 1470         if ((current->flags & PF_NPROC_EXCEEDED) &&
 1471             atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
 1472                 retval = -EAGAIN;
 1473                 goto out_ret;
 1474         }
 1475 
 1476         /* We're below the limit (still or again), so we don't want to make
 1477          * further execve() calls fail. */
 1478         current->flags &= ~PF_NPROC_EXCEEDED;
 1479 
 1480         retval = unshare_files(&displaced);
 1481         if (retval)
 1482                 goto out_ret;
 1483 
 1484         retval = -ENOMEM;
 1485         bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
 1486         if (!bprm)
 1487                 goto out_files;
 1488 
 1489         retval = prepare_bprm_creds(bprm);
 1490         if (retval)
 1491                 goto out_free;
 1492 
 1493         retval = check_unsafe_exec(bprm);
 1494         if (retval < 0)
 1495                 goto out_free;
 1496         clear_in_exec = retval;
 1497         current->in_execve = 1;
 1498 
 1499         file = open_exec(filename);
 1500         retval = PTR_ERR(file);
 1501         if (IS_ERR(file))
 1502                 goto out_unmark;
 1503 
 1504         sched_exec();
 1505 
 1506         bprm->file = file;
 1507         bprm->filename = filename;
 1508         bprm->interp = filename;
 1509 
 1510         retval = bprm_mm_init(bprm);
 1511         if (retval)
 1512                 goto out_file;
 1513 
 1514         bprm->argc = count(argv, MAX_ARG_STRINGS);
 1515         if ((retval = bprm->argc) < 0)
 1516                 goto out;
 1517 
 1518         bprm->envc = count(envp, MAX_ARG_STRINGS);
 1519         if ((retval = bprm->envc) < 0)
 1520                 goto out;
 1521 
 1522         retval = prepare_binprm(bprm);
 1523         if (retval < 0)
 1524                 goto out;
 1525 
 1526         retval = copy_strings_kernel(1, &bprm->filename, bprm);
 1527         if (retval < 0)
 1528                 goto out;
 1529 
 1530         bprm->exec = bprm->p;
 1531         retval = copy_strings(bprm->envc, envp, bprm);
 1532         if (retval < 0)
 1533                 goto out;
 1534 
 1535         retval = copy_strings(bprm->argc, argv, bprm);
 1536         if (retval < 0)
 1537                 goto out;
 1538 
 1539         retval = search_binary_handler(bprm);
 1540         if (retval < 0)
 1541                 goto out;
 1542 
 1543         /* execve succeeded */
 1544         current->fs->in_exec = 0;
 1545         current->in_execve = 0;
 1546         acct_update_integrals(current);
 1547         free_bprm(bprm);
 1548         if (displaced)
 1549                 put_files_struct(displaced);
 1550         return retval;
 1551 
 1552 out:
 1553         if (bprm->mm) {
 1554                 acct_arg_size(bprm, 0);
 1555                 mmput(bprm->mm);
 1556         }
 1557 
 1558 out_file:
 1559         if (bprm->file) {
 1560                 allow_write_access(bprm->file);
 1561                 fput(bprm->file);
 1562         }
 1563 
 1564 out_unmark:
 1565         if (clear_in_exec)
 1566                 current->fs->in_exec = 0;
 1567         current->in_execve = 0;
 1568 
 1569 out_free:
 1570         free_bprm(bprm);
 1571 
 1572 out_files:
 1573         if (displaced)
 1574                 reset_files_struct(displaced);
 1575 out_ret:
 1576         return retval;
 1577 }
 1578 
 1579 int do_execve(const char *filename,
 1580         const char __user *const __user *__argv,
 1581         const char __user *const __user *__envp)
 1582 {
 1583         struct user_arg_ptr argv = { .ptr.native = __argv };
 1584         struct user_arg_ptr envp = { .ptr.native = __envp };
 1585         return do_execve_common(filename, argv, envp);
 1586 }
 1587 
 1588 #ifdef CONFIG_COMPAT
 1589 static int compat_do_execve(const char *filename,
 1590         const compat_uptr_t __user *__argv,
 1591         const compat_uptr_t __user *__envp)
 1592 {
 1593         struct user_arg_ptr argv = {
 1594                 .is_compat = true,
 1595                 .ptr.compat = __argv,
 1596         };
 1597         struct user_arg_ptr envp = {
 1598                 .is_compat = true,
 1599                 .ptr.compat = __envp,
 1600         };
 1601         return do_execve_common(filename, argv, envp);
 1602 }
 1603 #endif
 1604 
 1605 void set_binfmt(struct linux_binfmt *new)
 1606 {
 1607         struct mm_struct *mm = current->mm;
 1608 
 1609         if (mm->binfmt)
 1610                 module_put(mm->binfmt->module);
 1611 
 1612         mm->binfmt = new;
 1613         if (new)
 1614                 __module_get(new->module);
 1615 }
 1616 
 1617 EXPORT_SYMBOL(set_binfmt);
 1618 
 1619 /*
 1620  * set_dumpable converts traditional three-value dumpable to two flags and
 1621  * stores them into mm->flags.  It modifies lower two bits of mm->flags, but
 1622  * these bits are not changed atomically.  So get_dumpable can observe the
 1623  * intermediate state.  To avoid doing unexpected behavior, get get_dumpable
 1624  * return either old dumpable or new one by paying attention to the order of
 1625  * modifying the bits.
 1626  *
 1627  * dumpable |   mm->flags (binary)
 1628  * old  new | initial interim  final
 1629  * ---------+-----------------------
 1630  *  0    1  |   00      01      01
 1631  *  0    2  |   00      10(*)   11
 1632  *  1    0  |   01      00      00
 1633  *  1    2  |   01      11      11
 1634  *  2    0  |   11      10(*)   00
 1635  *  2    1  |   11      11      01
 1636  *
 1637  * (*) get_dumpable regards interim value of 10 as 11.
 1638  */
 1639 void set_dumpable(struct mm_struct *mm, int value)
 1640 {
 1641         switch (value) {
 1642         case SUID_DUMPABLE_DISABLED:
 1643                 clear_bit(MMF_DUMPABLE, &mm->flags);
 1644                 smp_wmb();
 1645                 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
 1646                 break;
 1647         case SUID_DUMPABLE_ENABLED:
 1648                 set_bit(MMF_DUMPABLE, &mm->flags);
 1649                 smp_wmb();
 1650                 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
 1651                 break;
 1652         case SUID_DUMPABLE_SAFE:
 1653                 set_bit(MMF_DUMP_SECURELY, &mm->flags);
 1654                 smp_wmb();
 1655                 set_bit(MMF_DUMPABLE, &mm->flags);
 1656                 break;
 1657         }
 1658 }
 1659 
 1660 int __get_dumpable(unsigned long mm_flags)
 1661 {
 1662         int ret;
 1663 
 1664         ret = mm_flags & MMF_DUMPABLE_MASK;
 1665         return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
 1666 }
 1667 
 1668 int get_dumpable(struct mm_struct *mm)
 1669 {
 1670         return __get_dumpable(mm->flags);
 1671 }
 1672 
 1673 SYSCALL_DEFINE3(execve,
 1674                 const char __user *, filename,
 1675                 const char __user *const __user *, argv,
 1676                 const char __user *const __user *, envp)
 1677 {
 1678         struct filename *path = getname(filename);
 1679         int error = PTR_ERR(path);
 1680         if (!IS_ERR(path)) {
 1681                 error = do_execve(path->name, argv, envp);
 1682                 putname(path);
 1683         }
 1684         return error;
 1685 }
 1686 #ifdef CONFIG_COMPAT
 1687 asmlinkage long compat_sys_execve(const char __user * filename,
 1688         const compat_uptr_t __user * argv,
 1689         const compat_uptr_t __user * envp)
 1690 {
 1691         struct filename *path = getname(filename);
 1692         int error = PTR_ERR(path);
 1693         if (!IS_ERR(path)) {
 1694                 error = compat_do_execve(path->name, argv, envp);
 1695                 putname(path);
 1696         }
 1697         return error;
 1698 }
 1699 #endif

Cache object: 60fa31c570993d20ddd9d81ebe847d62


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.