The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_exec.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1993, David Greenman
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include "opt_capsicum.h"
   31 #include "opt_hwpmc_hooks.h"
   32 #include "opt_kdtrace.h"
   33 #include "opt_ktrace.h"
   34 #include "opt_vm.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/capability.h>
   38 #include <sys/systm.h>
   39 #include <sys/capability.h>
   40 #include <sys/eventhandler.h>
   41 #include <sys/lock.h>
   42 #include <sys/mutex.h>
   43 #include <sys/sysproto.h>
   44 #include <sys/signalvar.h>
   45 #include <sys/kernel.h>
   46 #include <sys/mount.h>
   47 #include <sys/filedesc.h>
   48 #include <sys/fcntl.h>
   49 #include <sys/acct.h>
   50 #include <sys/exec.h>
   51 #include <sys/imgact.h>
   52 #include <sys/imgact_elf.h>
   53 #include <sys/wait.h>
   54 #include <sys/malloc.h>
   55 #include <sys/priv.h>
   56 #include <sys/proc.h>
   57 #include <sys/pioctl.h>
   58 #include <sys/namei.h>
   59 #include <sys/resourcevar.h>
   60 #include <sys/sched.h>
   61 #include <sys/sdt.h>
   62 #include <sys/sf_buf.h>
   63 #include <sys/syscallsubr.h>
   64 #include <sys/sysent.h>
   65 #include <sys/shm.h>
   66 #include <sys/sysctl.h>
   67 #include <sys/vnode.h>
   68 #include <sys/stat.h>
   69 #ifdef KTRACE
   70 #include <sys/ktrace.h>
   71 #endif
   72 
   73 #include <vm/vm.h>
   74 #include <vm/vm_param.h>
   75 #include <vm/pmap.h>
   76 #include <vm/vm_page.h>
   77 #include <vm/vm_map.h>
   78 #include <vm/vm_kern.h>
   79 #include <vm/vm_extern.h>
   80 #include <vm/vm_object.h>
   81 #include <vm/vm_pager.h>
   82 
   83 #ifdef  HWPMC_HOOKS
   84 #include <sys/pmckern.h>
   85 #endif
   86 
   87 #include <machine/reg.h>
   88 
   89 #include <security/audit/audit.h>
   90 #include <security/mac/mac_framework.h>
   91 
   92 #ifdef KDTRACE_HOOKS
   93 #include <sys/dtrace_bsd.h>
   94 dtrace_execexit_func_t  dtrace_fasttrap_exec;
   95 #endif
   96 
   97 SDT_PROVIDER_DECLARE(proc);
   98 SDT_PROBE_DEFINE1(proc, kernel, , exec, "char *");
   99 SDT_PROBE_DEFINE1(proc, kernel, , exec__failure, "int");
  100 SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *");
  101 
  102 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
  103 
  104 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
  105 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
  106 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
  107 static int do_execve(struct thread *td, struct image_args *args,
  108     struct mac *mac_p);
  109 
  110 /* XXX This should be vm_size_t. */
  111 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
  112     NULL, 0, sysctl_kern_ps_strings, "LU", "");
  113 
  114 /* XXX This should be vm_size_t. */
  115 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
  116     CTLFLAG_CAPRD, NULL, 0, sysctl_kern_usrstack, "LU", "");
  117 
  118 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD,
  119     NULL, 0, sysctl_kern_stackprot, "I", "");
  120 
  121 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
  122 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, 
  123     &ps_arg_cache_limit, 0, "");
  124 
  125 static int disallow_high_osrel;
  126 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
  127     &disallow_high_osrel, 0,
  128     "Disallow execution of binaries built for higher version of the world");
  129 
  130 static int map_at_zero = 0;
  131 TUNABLE_INT("security.bsd.map_at_zero", &map_at_zero);
  132 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RW, &map_at_zero, 0,
  133     "Permit processes to map an object at virtual address 0.");
  134 
  135 static int
  136 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
  137 {
  138         struct proc *p;
  139         int error;
  140 
  141         p = curproc;
  142 #ifdef SCTL_MASK32
  143         if (req->flags & SCTL_MASK32) {
  144                 unsigned int val;
  145                 val = (unsigned int)p->p_sysent->sv_psstrings;
  146                 error = SYSCTL_OUT(req, &val, sizeof(val));
  147         } else
  148 #endif
  149                 error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
  150                    sizeof(p->p_sysent->sv_psstrings));
  151         return error;
  152 }
  153 
  154 static int
  155 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
  156 {
  157         struct proc *p;
  158         int error;
  159 
  160         p = curproc;
  161 #ifdef SCTL_MASK32
  162         if (req->flags & SCTL_MASK32) {
  163                 unsigned int val;
  164                 val = (unsigned int)p->p_sysent->sv_usrstack;
  165                 error = SYSCTL_OUT(req, &val, sizeof(val));
  166         } else
  167 #endif
  168                 error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
  169                     sizeof(p->p_sysent->sv_usrstack));
  170         return error;
  171 }
  172 
  173 static int
  174 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
  175 {
  176         struct proc *p;
  177 
  178         p = curproc;
  179         return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
  180             sizeof(p->p_sysent->sv_stackprot)));
  181 }
  182 
  183 /*
  184  * Each of the items is a pointer to a `const struct execsw', hence the
  185  * double pointer here.
  186  */
  187 static const struct execsw **execsw;
  188 
  189 #ifndef _SYS_SYSPROTO_H_
  190 struct execve_args {
  191         char    *fname; 
  192         char    **argv;
  193         char    **envv; 
  194 };
  195 #endif
  196 
  197 int
  198 sys_execve(td, uap)
  199         struct thread *td;
  200         struct execve_args /* {
  201                 char *fname;
  202                 char **argv;
  203                 char **envv;
  204         } */ *uap;
  205 {
  206         int error;
  207         struct image_args args;
  208 
  209         error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
  210             uap->argv, uap->envv);
  211         if (error == 0)
  212                 error = kern_execve(td, &args, NULL);
  213         return (error);
  214 }
  215 
  216 #ifndef _SYS_SYSPROTO_H_
  217 struct fexecve_args {
  218         int     fd;
  219         char    **argv;
  220         char    **envv;
  221 }
  222 #endif
  223 int
  224 sys_fexecve(struct thread *td, struct fexecve_args *uap)
  225 {
  226         int error;
  227         struct image_args args;
  228 
  229         error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
  230             uap->argv, uap->envv);
  231         if (error == 0) {
  232                 args.fd = uap->fd;
  233                 error = kern_execve(td, &args, NULL);
  234         }
  235         return (error);
  236 }
  237 
  238 #ifndef _SYS_SYSPROTO_H_
  239 struct __mac_execve_args {
  240         char    *fname;
  241         char    **argv;
  242         char    **envv;
  243         struct mac      *mac_p;
  244 };
  245 #endif
  246 
  247 int
  248 sys___mac_execve(td, uap)
  249         struct thread *td;
  250         struct __mac_execve_args /* {
  251                 char *fname;
  252                 char **argv;
  253                 char **envv;
  254                 struct mac *mac_p;
  255         } */ *uap;
  256 {
  257 #ifdef MAC
  258         int error;
  259         struct image_args args;
  260 
  261         error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
  262             uap->argv, uap->envv);
  263         if (error == 0)
  264                 error = kern_execve(td, &args, uap->mac_p);
  265         return (error);
  266 #else
  267         return (ENOSYS);
  268 #endif
  269 }
  270 
  271 /*
  272  * XXX: kern_execve has the astonishing property of not always returning to
  273  * the caller.  If sufficiently bad things happen during the call to
  274  * do_execve(), it can end up calling exit1(); as a result, callers must
  275  * avoid doing anything which they might need to undo (e.g., allocating
  276  * memory).
  277  */
  278 int
  279 kern_execve(td, args, mac_p)
  280         struct thread *td;
  281         struct image_args *args;
  282         struct mac *mac_p;
  283 {
  284         struct proc *p = td->td_proc;
  285         struct vmspace *oldvmspace;
  286         int error;
  287 
  288         AUDIT_ARG_ARGV(args->begin_argv, args->argc,
  289             args->begin_envv - args->begin_argv);
  290         AUDIT_ARG_ENVV(args->begin_envv, args->envc,
  291             args->endp - args->begin_envv);
  292         if (p->p_flag & P_HADTHREADS) {
  293                 PROC_LOCK(p);
  294                 if (thread_single(SINGLE_BOUNDARY)) {
  295                         PROC_UNLOCK(p);
  296                         exec_free_args(args);
  297                         return (ERESTART);      /* Try again later. */
  298                 }
  299                 PROC_UNLOCK(p);
  300         }
  301 
  302         KASSERT((td->td_pflags & TDP_EXECVMSPC) == 0, ("nested execve"));
  303         oldvmspace = td->td_proc->p_vmspace;
  304         error = do_execve(td, args, mac_p);
  305 
  306         if (p->p_flag & P_HADTHREADS) {
  307                 PROC_LOCK(p);
  308                 /*
  309                  * If success, we upgrade to SINGLE_EXIT state to
  310                  * force other threads to suicide.
  311                  */
  312                 if (error == 0)
  313                         thread_single(SINGLE_EXIT);
  314                 else
  315                         thread_single_end();
  316                 PROC_UNLOCK(p);
  317         }
  318         if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
  319                 KASSERT(td->td_proc->p_vmspace != oldvmspace,
  320                     ("oldvmspace still used"));
  321                 vmspace_free(oldvmspace);
  322                 td->td_pflags &= ~TDP_EXECVMSPC;
  323         }
  324 
  325         return (error);
  326 }
  327 
  328 /*
  329  * In-kernel implementation of execve().  All arguments are assumed to be
  330  * userspace pointers from the passed thread.
  331  */
  332 static int
  333 do_execve(td, args, mac_p)
  334         struct thread *td;
  335         struct image_args *args;
  336         struct mac *mac_p;
  337 {
  338         struct proc *p = td->td_proc;
  339         struct nameidata nd;
  340         struct ucred *newcred = NULL, *oldcred;
  341         struct uidinfo *euip;
  342         register_t *stack_base;
  343         int error, i;
  344         struct image_params image_params, *imgp;
  345         struct vattr attr;
  346         int (*img_first)(struct image_params *);
  347         struct pargs *oldargs = NULL, *newargs = NULL;
  348         struct sigacts *oldsigacts, *newsigacts;
  349 #ifdef KTRACE
  350         struct vnode *tracevp = NULL;
  351         struct ucred *tracecred = NULL;
  352 #endif
  353         struct vnode *textvp = NULL, *binvp = NULL;
  354         int credential_changing;
  355         int vfslocked;
  356         int textset;
  357 #ifdef MAC
  358         struct label *interpvplabel = NULL;
  359         int will_transition;
  360 #endif
  361 #ifdef HWPMC_HOOKS
  362         struct pmckern_procexec pe;
  363 #endif
  364         static const char fexecv_proc_title[] = "(fexecv)";
  365 
  366         vfslocked = 0;
  367         imgp = &image_params;
  368 
  369         /*
  370          * Lock the process and set the P_INEXEC flag to indicate that
  371          * it should be left alone until we're done here.  This is
  372          * necessary to avoid race conditions - e.g. in ptrace() -
  373          * that might allow a local user to illicitly obtain elevated
  374          * privileges.
  375          */
  376         PROC_LOCK(p);
  377         KASSERT((p->p_flag & P_INEXEC) == 0,
  378             ("%s(): process already has P_INEXEC flag", __func__));
  379         p->p_flag |= P_INEXEC;
  380         PROC_UNLOCK(p);
  381 
  382         /*
  383          * Initialize part of the common data
  384          */
  385         imgp->proc = p;
  386         imgp->execlabel = NULL;
  387         imgp->attr = &attr;
  388         imgp->entry_addr = 0;
  389         imgp->reloc_base = 0;
  390         imgp->vmspace_destroyed = 0;
  391         imgp->interpreted = 0;
  392         imgp->opened = 0;
  393         imgp->interpreter_name = NULL;
  394         imgp->auxargs = NULL;
  395         imgp->vp = NULL;
  396         imgp->object = NULL;
  397         imgp->firstpage = NULL;
  398         imgp->ps_strings = 0;
  399         imgp->auxarg_size = 0;
  400         imgp->args = args;
  401         imgp->execpath = imgp->freepath = NULL;
  402         imgp->execpathp = 0;
  403         imgp->canary = 0;
  404         imgp->canarylen = 0;
  405         imgp->pagesizes = 0;
  406         imgp->pagesizeslen = 0;
  407         imgp->stack_prot = 0;
  408 
  409 #ifdef MAC
  410         error = mac_execve_enter(imgp, mac_p);
  411         if (error)
  412                 goto exec_fail;
  413 #endif
  414 
  415         imgp->image_header = NULL;
  416 
  417         /*
  418          * Translate the file name. namei() returns a vnode pointer
  419          *      in ni_vp amoung other things.
  420          *
  421          * XXXAUDIT: It would be desirable to also audit the name of the
  422          * interpreter if this is an interpreted binary.
  423          */
  424         if (args->fname != NULL) {
  425                 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME
  426                     | MPSAFE | AUDITVNODE1, UIO_SYSSPACE, args->fname, td);
  427         }
  428 
  429         SDT_PROBE1(proc, kernel, , exec, args->fname);
  430 
  431 interpret:
  432         if (args->fname != NULL) {
  433 #ifdef CAPABILITY_MODE
  434                 /*
  435                  * While capability mode can't reach this point via direct
  436                  * path arguments to execve(), we also don't allow
  437                  * interpreters to be used in capability mode (for now).
  438                  * Catch indirect lookups and return a permissions error.
  439                  */
  440                 if (IN_CAPABILITY_MODE(td)) {
  441                         error = ECAPMODE;
  442                         goto exec_fail;
  443                 }
  444 #endif
  445                 error = namei(&nd);
  446                 if (error)
  447                         goto exec_fail;
  448 
  449                 vfslocked = NDHASGIANT(&nd);
  450                 binvp  = nd.ni_vp;
  451                 imgp->vp = binvp;
  452         } else {
  453                 AUDIT_ARG_FD(args->fd);
  454                 /*
  455                  * Some might argue that CAP_READ and/or CAP_MMAP should also
  456                  * be required here; such arguments will be entertained.
  457                  *
  458                  * Descriptors opened only with O_EXEC or O_RDONLY are allowed.
  459                  */
  460                 error = fgetvp_exec(td, args->fd, CAP_FEXECVE, &binvp);
  461                 if (error)
  462                         goto exec_fail;
  463                 vfslocked = VFS_LOCK_GIANT(binvp->v_mount);
  464                 vn_lock(binvp, LK_EXCLUSIVE | LK_RETRY);
  465                 AUDIT_ARG_VNODE1(binvp);
  466                 imgp->vp = binvp;
  467         }
  468 
  469         /*
  470          * Check file permissions (also 'opens' file)
  471          */
  472         error = exec_check_permissions(imgp);
  473         if (error)
  474                 goto exec_fail_dealloc;
  475 
  476         imgp->object = imgp->vp->v_object;
  477         if (imgp->object != NULL)
  478                 vm_object_reference(imgp->object);
  479 
  480         /*
  481          * Set VV_TEXT now so no one can write to the executable while we're
  482          * activating it.
  483          *
  484          * Remember if this was set before and unset it in case this is not
  485          * actually an executable image.
  486          */
  487         textset = VOP_IS_TEXT(imgp->vp);
  488         VOP_SET_TEXT(imgp->vp);
  489 
  490         error = exec_map_first_page(imgp);
  491         if (error)
  492                 goto exec_fail_dealloc;
  493 
  494         imgp->proc->p_osrel = 0;
  495         /*
  496          *      If the current process has a special image activator it
  497          *      wants to try first, call it.   For example, emulating shell
  498          *      scripts differently.
  499          */
  500         error = -1;
  501         if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
  502                 error = img_first(imgp);
  503 
  504         /*
  505          *      Loop through the list of image activators, calling each one.
  506          *      An activator returns -1 if there is no match, 0 on success,
  507          *      and an error otherwise.
  508          */
  509         for (i = 0; error == -1 && execsw[i]; ++i) {
  510                 if (execsw[i]->ex_imgact == NULL ||
  511                     execsw[i]->ex_imgact == img_first) {
  512                         continue;
  513                 }
  514                 error = (*execsw[i]->ex_imgact)(imgp);
  515         }
  516 
  517         if (error) {
  518                 if (error == -1) {
  519                         if (textset == 0)
  520                                 VOP_UNSET_TEXT(imgp->vp);
  521                         error = ENOEXEC;
  522                 }
  523                 goto exec_fail_dealloc;
  524         }
  525 
  526         /*
  527          * Special interpreter operation, cleanup and loop up to try to
  528          * activate the interpreter.
  529          */
  530         if (imgp->interpreted) {
  531                 exec_unmap_first_page(imgp);
  532                 /*
  533                  * VV_TEXT needs to be unset for scripts.  There is a short
  534                  * period before we determine that something is a script where
  535                  * VV_TEXT will be set. The vnode lock is held over this
  536                  * entire period so nothing should illegitimately be blocked.
  537                  */
  538                 VOP_UNSET_TEXT(imgp->vp);
  539                 /* free name buffer and old vnode */
  540                 if (args->fname != NULL)
  541                         NDFREE(&nd, NDF_ONLY_PNBUF);
  542 #ifdef MAC
  543                 mac_execve_interpreter_enter(binvp, &interpvplabel);
  544 #endif
  545                 if (imgp->opened) {
  546                         VOP_CLOSE(binvp, FREAD, td->td_ucred, td);
  547                         imgp->opened = 0;
  548                 }
  549                 vput(binvp);
  550                 vm_object_deallocate(imgp->object);
  551                 imgp->object = NULL;
  552                 VFS_UNLOCK_GIANT(vfslocked);
  553                 vfslocked = 0;
  554                 /* set new name to that of the interpreter */
  555                 NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME | MPSAFE,
  556                     UIO_SYSSPACE, imgp->interpreter_name, td);
  557                 args->fname = imgp->interpreter_name;
  558                 goto interpret;
  559         }
  560 
  561         /*
  562          * NB: We unlock the vnode here because it is believed that none
  563          * of the sv_copyout_strings/sv_fixup operations require the vnode.
  564          */
  565         VOP_UNLOCK(imgp->vp, 0);
  566 
  567         /*
  568          * Do the best to calculate the full path to the image file.
  569          */
  570         if (imgp->auxargs != NULL &&
  571             ((args->fname != NULL && args->fname[0] == '/') ||
  572              vn_fullpath(td, imgp->vp, &imgp->execpath, &imgp->freepath) != 0))
  573                 imgp->execpath = args->fname;
  574 
  575         if (disallow_high_osrel &&
  576             P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
  577                 error = ENOEXEC;
  578                 uprintf("Osrel %d for image %s too high\n", p->p_osrel,
  579                     imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
  580                 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
  581                 goto exec_fail_dealloc;
  582         }
  583 
  584         /*
  585          * Copy out strings (args and env) and initialize stack base
  586          */
  587         if (p->p_sysent->sv_copyout_strings)
  588                 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
  589         else
  590                 stack_base = exec_copyout_strings(imgp);
  591 
  592         /*
  593          * If custom stack fixup routine present for this process
  594          * let it do the stack setup.
  595          * Else stuff argument count as first item on stack
  596          */
  597         if (p->p_sysent->sv_fixup != NULL)
  598                 (*p->p_sysent->sv_fixup)(&stack_base, imgp);
  599         else
  600                 suword(--stack_base, imgp->args->argc);
  601 
  602         /*
  603          * For security and other reasons, the file descriptor table cannot
  604          * be shared after an exec.
  605          */
  606         fdunshare(p, td);
  607 
  608         /*
  609          * Malloc things before we need locks.
  610          */
  611         newcred = crget();
  612         euip = uifind(attr.va_uid);
  613         i = imgp->args->begin_envv - imgp->args->begin_argv;
  614         /* Cache arguments if they fit inside our allowance */
  615         if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
  616                 newargs = pargs_alloc(i);
  617                 bcopy(imgp->args->begin_argv, newargs->ar_args, i);
  618         }
  619 
  620         /* close files on exec */
  621         fdcloseexec(td);
  622         vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
  623 
  624         /* Get a reference to the vnode prior to locking the proc */
  625         VREF(binvp);
  626 
  627         /*
  628          * For security and other reasons, signal handlers cannot
  629          * be shared after an exec. The new process gets a copy of the old
  630          * handlers. In execsigs(), the new process will have its signals
  631          * reset.
  632          */
  633         PROC_LOCK(p);
  634         oldcred = crcopysafe(p, newcred);
  635         if (sigacts_shared(p->p_sigacts)) {
  636                 oldsigacts = p->p_sigacts;
  637                 PROC_UNLOCK(p);
  638                 newsigacts = sigacts_alloc();
  639                 sigacts_copy(newsigacts, oldsigacts);
  640                 PROC_LOCK(p);
  641                 p->p_sigacts = newsigacts;
  642         } else
  643                 oldsigacts = NULL;
  644 
  645         /* Stop profiling */
  646         stopprofclock(p);
  647 
  648         /* reset caught signals */
  649         execsigs(p);
  650 
  651         /* name this process - nameiexec(p, ndp) */
  652         bzero(p->p_comm, sizeof(p->p_comm));
  653         if (args->fname)
  654                 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
  655                     min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
  656         else if (vn_commname(binvp, p->p_comm, sizeof(p->p_comm)) != 0)
  657                 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
  658         bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
  659 #ifdef KTR
  660         sched_clear_tdname(td);
  661 #endif
  662 
  663         /*
  664          * mark as execed, wakeup the process that vforked (if any) and tell
  665          * it that it now has its own resources back
  666          */
  667         p->p_flag |= P_EXEC;
  668         if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
  669                 p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
  670                 cv_broadcast(&p->p_pwait);
  671         }
  672 
  673         /*
  674          * Implement image setuid/setgid.
  675          *
  676          * Don't honor setuid/setgid if the filesystem prohibits it or if
  677          * the process is being traced.
  678          *
  679          * We disable setuid/setgid/etc in compatibility mode on the basis
  680          * that most setugid applications are not written with that
  681          * environment in mind, and will therefore almost certainly operate
  682          * incorrectly. In principle there's no reason that setugid
  683          * applications might not be useful in capability mode, so we may want
  684          * to reconsider this conservative design choice in the future.
  685          *
  686          * XXXMAC: For the time being, use NOSUID to also prohibit
  687          * transitions on the file system.
  688          */
  689         credential_changing = 0;
  690         credential_changing |= (attr.va_mode & S_ISUID) && oldcred->cr_uid !=
  691             attr.va_uid;
  692         credential_changing |= (attr.va_mode & S_ISGID) && oldcred->cr_gid !=
  693             attr.va_gid;
  694 #ifdef MAC
  695         will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
  696             interpvplabel, imgp);
  697         credential_changing |= will_transition;
  698 #endif
  699 
  700         if (credential_changing &&
  701 #ifdef CAPABILITY_MODE
  702             ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
  703 #endif
  704             (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
  705             (p->p_flag & P_TRACED) == 0) {
  706                 /*
  707                  * Turn off syscall tracing for set-id programs, except for
  708                  * root.  Record any set-id flags first to make sure that
  709                  * we do not regain any tracing during a possible block.
  710                  */
  711                 setsugid(p);
  712 
  713 #ifdef KTRACE
  714                 if (p->p_tracecred != NULL &&
  715                     priv_check_cred(p->p_tracecred, PRIV_DEBUG_DIFFCRED, 0))
  716                         ktrprocexec(p, &tracecred, &tracevp);
  717 #endif
  718                 /*
  719                  * Close any file descriptors 0..2 that reference procfs,
  720                  * then make sure file descriptors 0..2 are in use.
  721                  *
  722                  * setugidsafety() may call closef() and then pfind()
  723                  * which may grab the process lock.
  724                  * fdcheckstd() may call falloc() which may block to
  725                  * allocate memory, so temporarily drop the process lock.
  726                  */
  727                 PROC_UNLOCK(p);
  728                 VOP_UNLOCK(imgp->vp, 0);
  729                 setugidsafety(td);
  730                 error = fdcheckstd(td);
  731                 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
  732                 if (error != 0)
  733                         goto done1;
  734                 PROC_LOCK(p);
  735                 /*
  736                  * Set the new credentials.
  737                  */
  738                 if (attr.va_mode & S_ISUID)
  739                         change_euid(newcred, euip);
  740                 if (attr.va_mode & S_ISGID)
  741                         change_egid(newcred, attr.va_gid);
  742 #ifdef MAC
  743                 if (will_transition) {
  744                         mac_vnode_execve_transition(oldcred, newcred, imgp->vp,
  745                             interpvplabel, imgp);
  746                 }
  747 #endif
  748                 /*
  749                  * Implement correct POSIX saved-id behavior.
  750                  *
  751                  * XXXMAC: Note that the current logic will save the
  752                  * uid and gid if a MAC domain transition occurs, even
  753                  * though maybe it shouldn't.
  754                  */
  755                 change_svuid(newcred, newcred->cr_uid);
  756                 change_svgid(newcred, newcred->cr_gid);
  757                 p->p_ucred = newcred;
  758                 newcred = NULL;
  759         } else {
  760                 if (oldcred->cr_uid == oldcred->cr_ruid &&
  761                     oldcred->cr_gid == oldcred->cr_rgid)
  762                         p->p_flag &= ~P_SUGID;
  763                 /*
  764                  * Implement correct POSIX saved-id behavior.
  765                  *
  766                  * XXX: It's not clear that the existing behavior is
  767                  * POSIX-compliant.  A number of sources indicate that the
  768                  * saved uid/gid should only be updated if the new ruid is
  769                  * not equal to the old ruid, or the new euid is not equal
  770                  * to the old euid and the new euid is not equal to the old
  771                  * ruid.  The FreeBSD code always updates the saved uid/gid.
  772                  * Also, this code uses the new (replaced) euid and egid as
  773                  * the source, which may or may not be the right ones to use.
  774                  */
  775                 if (oldcred->cr_svuid != oldcred->cr_uid ||
  776                     oldcred->cr_svgid != oldcred->cr_gid) {
  777                         change_svuid(newcred, newcred->cr_uid);
  778                         change_svgid(newcred, newcred->cr_gid);
  779                         p->p_ucred = newcred;
  780                         newcred = NULL;
  781                 }
  782         }
  783 
  784         /*
  785          * Store the vp for use in procfs.  This vnode was referenced prior
  786          * to locking the proc lock.
  787          */
  788         textvp = p->p_textvp;
  789         p->p_textvp = binvp;
  790 
  791 #ifdef KDTRACE_HOOKS
  792         /*
  793          * Tell the DTrace fasttrap provider about the exec if it
  794          * has declared an interest.
  795          */
  796         if (dtrace_fasttrap_exec)
  797                 dtrace_fasttrap_exec(p);
  798 #endif
  799 
  800         /*
  801          * Notify others that we exec'd, and clear the P_INEXEC flag
  802          * as we're now a bona fide freshly-execed process.
  803          */
  804         KNOTE_LOCKED(&p->p_klist, NOTE_EXEC);
  805         p->p_flag &= ~P_INEXEC;
  806 
  807         /* clear "fork but no exec" flag, as we _are_ execing */
  808         p->p_acflag &= ~AFORK;
  809 
  810         /*
  811          * Free any previous argument cache and replace it with
  812          * the new argument cache, if any.
  813          */
  814         oldargs = p->p_args;
  815         p->p_args = newargs;
  816         newargs = NULL;
  817 
  818 #ifdef  HWPMC_HOOKS
  819         /*
  820          * Check if system-wide sampling is in effect or if the
  821          * current process is using PMCs.  If so, do exec() time
  822          * processing.  This processing needs to happen AFTER the
  823          * P_INEXEC flag is cleared.
  824          *
  825          * The proc lock needs to be released before taking the PMC
  826          * SX.
  827          */
  828         if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
  829                 PROC_UNLOCK(p);
  830                 VOP_UNLOCK(imgp->vp, 0);
  831                 pe.pm_credentialschanged = credential_changing;
  832                 pe.pm_entryaddr = imgp->entry_addr;
  833 
  834                 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
  835                 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
  836         } else
  837                 PROC_UNLOCK(p);
  838 #else  /* !HWPMC_HOOKS */
  839         PROC_UNLOCK(p);
  840 #endif
  841 
  842         /* Set values passed into the program in registers. */
  843         if (p->p_sysent->sv_setregs)
  844                 (*p->p_sysent->sv_setregs)(td, imgp, 
  845                     (u_long)(uintptr_t)stack_base);
  846         else
  847                 exec_setregs(td, imgp, (u_long)(uintptr_t)stack_base);
  848 
  849         vfs_mark_atime(imgp->vp, td->td_ucred);
  850 
  851         SDT_PROBE1(proc, kernel, , exec__success, args->fname);
  852 
  853 done1:
  854         /*
  855          * Free any resources malloc'd earlier that we didn't use.
  856          */
  857         uifree(euip);
  858         if (newcred == NULL)
  859                 crfree(oldcred);
  860         else
  861                 crfree(newcred);
  862         VOP_UNLOCK(imgp->vp, 0);
  863 
  864         /*
  865          * Handle deferred decrement of ref counts.
  866          */
  867         if (textvp != NULL) {
  868                 int tvfslocked;
  869 
  870                 tvfslocked = VFS_LOCK_GIANT(textvp->v_mount);
  871                 vrele(textvp);
  872                 VFS_UNLOCK_GIANT(tvfslocked);
  873         }
  874         if (binvp && error != 0)
  875                 vrele(binvp);
  876 #ifdef KTRACE
  877         if (tracevp != NULL) {
  878                 int tvfslocked;
  879 
  880                 tvfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
  881                 vrele(tracevp);
  882                 VFS_UNLOCK_GIANT(tvfslocked);
  883         }
  884         if (tracecred != NULL)
  885                 crfree(tracecred);
  886 #endif
  887         vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
  888         pargs_drop(oldargs);
  889         pargs_drop(newargs);
  890         if (oldsigacts != NULL)
  891                 sigacts_free(oldsigacts);
  892 
  893 exec_fail_dealloc:
  894 
  895         /*
  896          * free various allocated resources
  897          */
  898         if (imgp->firstpage != NULL)
  899                 exec_unmap_first_page(imgp);
  900 
  901         if (imgp->vp != NULL) {
  902                 if (args->fname)
  903                         NDFREE(&nd, NDF_ONLY_PNBUF);
  904                 if (imgp->opened)
  905                         VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
  906                 vput(imgp->vp);
  907         }
  908 
  909         if (imgp->object != NULL)
  910                 vm_object_deallocate(imgp->object);
  911 
  912         free(imgp->freepath, M_TEMP);
  913 
  914         if (error == 0) {
  915                 PROC_LOCK(p);
  916                 td->td_dbgflags |= TDB_EXEC;
  917                 PROC_UNLOCK(p);
  918 
  919                 /*
  920                  * Stop the process here if its stop event mask has
  921                  * the S_EXEC bit set.
  922                  */
  923                 STOPEVENT(p, S_EXEC, 0);
  924                 goto done2;
  925         }
  926 
  927 exec_fail:
  928         /* we're done here, clear P_INEXEC */
  929         PROC_LOCK(p);
  930         p->p_flag &= ~P_INEXEC;
  931         PROC_UNLOCK(p);
  932 
  933         SDT_PROBE1(proc, kernel, , exec__failure, error);
  934 
  935 done2:
  936 #ifdef MAC
  937         mac_execve_exit(imgp);
  938         mac_execve_interpreter_exit(interpvplabel);
  939 #endif
  940         VFS_UNLOCK_GIANT(vfslocked);
  941         exec_free_args(args);
  942 
  943         if (error && imgp->vmspace_destroyed) {
  944                 /* sorry, no more process anymore. exit gracefully */
  945                 exit1(td, W_EXITCODE(0, SIGABRT));
  946                 /* NOT REACHED */
  947         }
  948 
  949 #ifdef KTRACE
  950         if (error == 0)
  951                 ktrprocctor(p);
  952 #endif
  953 
  954         return (error);
  955 }
  956 
  957 int
  958 exec_map_first_page(imgp)
  959         struct image_params *imgp;
  960 {
  961         int rv, i;
  962         int initial_pagein;
  963         vm_page_t ma[VM_INITIAL_PAGEIN];
  964         vm_object_t object;
  965 
  966         if (imgp->firstpage != NULL)
  967                 exec_unmap_first_page(imgp);
  968 
  969         object = imgp->vp->v_object;
  970         if (object == NULL)
  971                 return (EACCES);
  972         VM_OBJECT_LOCK(object);
  973 #if VM_NRESERVLEVEL > 0
  974         if ((object->flags & OBJ_COLORED) == 0) {
  975                 object->flags |= OBJ_COLORED;
  976                 object->pg_color = 0;
  977         }
  978 #endif
  979         ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  980         if (ma[0]->valid != VM_PAGE_BITS_ALL) {
  981                 initial_pagein = VM_INITIAL_PAGEIN;
  982                 if (initial_pagein > object->size)
  983                         initial_pagein = object->size;
  984                 for (i = 1; i < initial_pagein; i++) {
  985                         if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
  986                                 if (ma[i]->valid)
  987                                         break;
  988                                 if ((ma[i]->oflags & VPO_BUSY) || ma[i]->busy)
  989                                         break;
  990                                 vm_page_busy(ma[i]);
  991                         } else {
  992                                 ma[i] = vm_page_alloc(object, i,
  993                                     VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
  994                                 if (ma[i] == NULL)
  995                                         break;
  996                         }
  997                 }
  998                 initial_pagein = i;
  999                 rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
 1000                 ma[0] = vm_page_lookup(object, 0);
 1001                 if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
 1002                         if (ma[0] != NULL) {
 1003                                 vm_page_lock(ma[0]);
 1004                                 vm_page_free(ma[0]);
 1005                                 vm_page_unlock(ma[0]);
 1006                         }
 1007                         VM_OBJECT_UNLOCK(object);
 1008                         return (EIO);
 1009                 }
 1010         }
 1011         vm_page_lock(ma[0]);
 1012         vm_page_hold(ma[0]);
 1013         vm_page_unlock(ma[0]);
 1014         vm_page_wakeup(ma[0]);
 1015         VM_OBJECT_UNLOCK(object);
 1016 
 1017         imgp->firstpage = sf_buf_alloc(ma[0], 0);
 1018         imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
 1019 
 1020         return (0);
 1021 }
 1022 
 1023 void
 1024 exec_unmap_first_page(imgp)
 1025         struct image_params *imgp;
 1026 {
 1027         vm_page_t m;
 1028 
 1029         if (imgp->firstpage != NULL) {
 1030                 m = sf_buf_page(imgp->firstpage);
 1031                 sf_buf_free(imgp->firstpage);
 1032                 imgp->firstpage = NULL;
 1033                 vm_page_lock(m);
 1034                 vm_page_unhold(m);
 1035                 vm_page_unlock(m);
 1036         }
 1037 }
 1038 
 1039 /*
 1040  * Destroy old address space, and allocate a new stack
 1041  *      The new stack is only SGROWSIZ large because it is grown
 1042  *      automatically in trap.c.
 1043  */
 1044 int
 1045 exec_new_vmspace(imgp, sv)
 1046         struct image_params *imgp;
 1047         struct sysentvec *sv;
 1048 {
 1049         int error;
 1050         struct proc *p = imgp->proc;
 1051         struct vmspace *vmspace = p->p_vmspace;
 1052         vm_object_t obj;
 1053         vm_offset_t sv_minuser, stack_addr;
 1054         vm_map_t map;
 1055         u_long ssiz;
 1056 
 1057         imgp->vmspace_destroyed = 1;
 1058         imgp->sysent = sv;
 1059 
 1060         /* May be called with Giant held */
 1061         EVENTHANDLER_INVOKE(process_exec, p, imgp);
 1062 
 1063         /*
 1064          * Blow away entire process VM, if address space not shared,
 1065          * otherwise, create a new VM space so that other threads are
 1066          * not disrupted
 1067          */
 1068         map = &vmspace->vm_map;
 1069         if (map_at_zero)
 1070                 sv_minuser = sv->sv_minuser;
 1071         else
 1072                 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
 1073         if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser &&
 1074             vm_map_max(map) == sv->sv_maxuser) {
 1075                 shmexit(vmspace);
 1076                 pmap_remove_pages(vmspace_pmap(vmspace));
 1077                 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
 1078         } else {
 1079                 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
 1080                 if (error)
 1081                         return (error);
 1082                 vmspace = p->p_vmspace;
 1083                 map = &vmspace->vm_map;
 1084         }
 1085 
 1086         /* Map a shared page */
 1087         obj = sv->sv_shared_page_obj;
 1088         if (obj != NULL) {
 1089                 vm_object_reference(obj);
 1090                 error = vm_map_fixed(map, obj, 0,
 1091                     sv->sv_shared_page_base, sv->sv_shared_page_len,
 1092                     VM_PROT_READ | VM_PROT_EXECUTE,
 1093                     VM_PROT_READ | VM_PROT_EXECUTE,
 1094                     MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
 1095                 if (error) {
 1096                         vm_object_deallocate(obj);
 1097                         return (error);
 1098                 }
 1099         }
 1100 
 1101         /* Allocate a new stack */
 1102         if (sv->sv_maxssiz != NULL)
 1103                 ssiz = *sv->sv_maxssiz;
 1104         else
 1105                 ssiz = maxssiz;
 1106         stack_addr = sv->sv_usrstack - ssiz;
 1107         error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
 1108             obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
 1109                 sv->sv_stackprot,
 1110             VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
 1111         if (error)
 1112                 return (error);
 1113 
 1114 #ifdef __ia64__
 1115         /* Allocate a new register stack */
 1116         stack_addr = IA64_BACKINGSTORE;
 1117         error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
 1118             sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP);
 1119         if (error)
 1120                 return (error);
 1121 #endif
 1122 
 1123         /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
 1124          * VM_STACK case, but they are still used to monitor the size of the
 1125          * process stack so we can check the stack rlimit.
 1126          */
 1127         vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
 1128         vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - ssiz;
 1129 
 1130         return (0);
 1131 }
 1132 
 1133 /*
 1134  * Copy out argument and environment strings from the old process address
 1135  * space into the temporary string buffer.
 1136  */
 1137 int
 1138 exec_copyin_args(struct image_args *args, char *fname,
 1139     enum uio_seg segflg, char **argv, char **envv)
 1140 {
 1141         char *argp, *envp;
 1142         int error;
 1143         size_t length;
 1144 
 1145         bzero(args, sizeof(*args));
 1146         if (argv == NULL)
 1147                 return (EFAULT);
 1148 
 1149         /*
 1150          * Allocate demand-paged memory for the file name, argument, and
 1151          * environment strings.
 1152          */
 1153         error = exec_alloc_args(args);
 1154         if (error != 0)
 1155                 return (error);
 1156 
 1157         /*
 1158          * Copy the file name.
 1159          */
 1160         if (fname != NULL) {
 1161                 args->fname = args->buf;
 1162                 error = (segflg == UIO_SYSSPACE) ?
 1163                     copystr(fname, args->fname, PATH_MAX, &length) :
 1164                     copyinstr(fname, args->fname, PATH_MAX, &length);
 1165                 if (error != 0)
 1166                         goto err_exit;
 1167         } else
 1168                 length = 0;
 1169 
 1170         args->begin_argv = args->buf + length;
 1171         args->endp = args->begin_argv;
 1172         args->stringspace = ARG_MAX;
 1173 
 1174         /*
 1175          * extract arguments first
 1176          */
 1177         while ((argp = (caddr_t) (intptr_t) fuword(argv++))) {
 1178                 if (argp == (caddr_t) -1) {
 1179                         error = EFAULT;
 1180                         goto err_exit;
 1181                 }
 1182                 if ((error = copyinstr(argp, args->endp,
 1183                     args->stringspace, &length))) {
 1184                         if (error == ENAMETOOLONG) 
 1185                                 error = E2BIG;
 1186                         goto err_exit;
 1187                 }
 1188                 args->stringspace -= length;
 1189                 args->endp += length;
 1190                 args->argc++;
 1191         }
 1192 
 1193         args->begin_envv = args->endp;
 1194 
 1195         /*
 1196          * extract environment strings
 1197          */
 1198         if (envv) {
 1199                 while ((envp = (caddr_t)(intptr_t)fuword(envv++))) {
 1200                         if (envp == (caddr_t)-1) {
 1201                                 error = EFAULT;
 1202                                 goto err_exit;
 1203                         }
 1204                         if ((error = copyinstr(envp, args->endp,
 1205                             args->stringspace, &length))) {
 1206                                 if (error == ENAMETOOLONG)
 1207                                         error = E2BIG;
 1208                                 goto err_exit;
 1209                         }
 1210                         args->stringspace -= length;
 1211                         args->endp += length;
 1212                         args->envc++;
 1213                 }
 1214         }
 1215 
 1216         return (0);
 1217 
 1218 err_exit:
 1219         exec_free_args(args);
 1220         return (error);
 1221 }
 1222 
 1223 /*
 1224  * Allocate temporary demand-paged, zero-filled memory for the file name,
 1225  * argument, and environment strings.  Returns zero if the allocation succeeds
 1226  * and ENOMEM otherwise.
 1227  */
 1228 int
 1229 exec_alloc_args(struct image_args *args)
 1230 {
 1231 
 1232         args->buf = (char *)kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
 1233         return (args->buf != NULL ? 0 : ENOMEM);
 1234 }
 1235 
 1236 void
 1237 exec_free_args(struct image_args *args)
 1238 {
 1239 
 1240         if (args->buf != NULL) {
 1241                 kmem_free_wakeup(exec_map, (vm_offset_t)args->buf,
 1242                     PATH_MAX + ARG_MAX);
 1243                 args->buf = NULL;
 1244         }
 1245         if (args->fname_buf != NULL) {
 1246                 free(args->fname_buf, M_TEMP);
 1247                 args->fname_buf = NULL;
 1248         }
 1249 }
 1250 
 1251 /*
 1252  * Copy strings out to the new process address space, constructing new arg
 1253  * and env vector tables. Return a pointer to the base so that it can be used
 1254  * as the initial stack pointer.
 1255  */
 1256 register_t *
 1257 exec_copyout_strings(imgp)
 1258         struct image_params *imgp;
 1259 {
 1260         int argc, envc;
 1261         char **vectp;
 1262         char *stringp;
 1263         uintptr_t destp;
 1264         register_t *stack_base;
 1265         struct ps_strings *arginfo;
 1266         struct proc *p;
 1267         size_t execpath_len;
 1268         int szsigcode, szps;
 1269         char canary[sizeof(long) * 8];
 1270 
 1271         szps = sizeof(pagesizes[0]) * MAXPAGESIZES;
 1272         /*
 1273          * Calculate string base and vector table pointers.
 1274          * Also deal with signal trampoline code for this exec type.
 1275          */
 1276         if (imgp->execpath != NULL && imgp->auxargs != NULL)
 1277                 execpath_len = strlen(imgp->execpath) + 1;
 1278         else
 1279                 execpath_len = 0;
 1280         p = imgp->proc;
 1281         szsigcode = 0;
 1282         arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
 1283         if (p->p_sysent->sv_sigcode_base == 0) {
 1284                 if (p->p_sysent->sv_szsigcode != NULL)
 1285                         szsigcode = *(p->p_sysent->sv_szsigcode);
 1286         }
 1287         destp = (uintptr_t)arginfo;
 1288 
 1289         /*
 1290          * install sigcode
 1291          */
 1292         if (szsigcode != 0) {
 1293                 destp -= szsigcode;
 1294                 destp = rounddown2(destp, sizeof(void *));
 1295                 copyout(p->p_sysent->sv_sigcode, (void *)destp, szsigcode);
 1296         }
 1297 
 1298         /*
 1299          * Copy the image path for the rtld.
 1300          */
 1301         if (execpath_len != 0) {
 1302                 destp -= execpath_len;
 1303                 imgp->execpathp = destp;
 1304                 copyout(imgp->execpath, (void *)destp, execpath_len);
 1305         }
 1306 
 1307         /*
 1308          * Prepare the canary for SSP.
 1309          */
 1310         arc4rand(canary, sizeof(canary), 0);
 1311         destp -= sizeof(canary);
 1312         imgp->canary = destp;
 1313         copyout(canary, (void *)destp, sizeof(canary));
 1314         imgp->canarylen = sizeof(canary);
 1315 
 1316         /*
 1317          * Prepare the pagesizes array.
 1318          */
 1319         destp -= szps;
 1320         destp = rounddown2(destp, sizeof(void *));
 1321         imgp->pagesizes = destp;
 1322         copyout(pagesizes, (void *)destp, szps);
 1323         imgp->pagesizeslen = szps;
 1324 
 1325         destp -= ARG_MAX - imgp->args->stringspace;
 1326         destp = rounddown2(destp, sizeof(void *));
 1327 
 1328         /*
 1329          * If we have a valid auxargs ptr, prepare some room
 1330          * on the stack.
 1331          */
 1332         if (imgp->auxargs) {
 1333                 /*
 1334                  * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
 1335                  * lower compatibility.
 1336                  */
 1337                 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
 1338                     (AT_COUNT * 2);
 1339                 /*
 1340                  * The '+ 2' is for the null pointers at the end of each of
 1341                  * the arg and env vector sets,and imgp->auxarg_size is room
 1342                  * for argument of Runtime loader.
 1343                  */
 1344                 vectp = (char **)(destp - (imgp->args->argc +
 1345                     imgp->args->envc + 2 + imgp->auxarg_size)
 1346                     * sizeof(char *));
 1347         } else {
 1348                 /*
 1349                  * The '+ 2' is for the null pointers at the end of each of
 1350                  * the arg and env vector sets
 1351                  */
 1352                 vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc
 1353                     + 2) * sizeof(char *));
 1354         }
 1355 
 1356         /*
 1357          * vectp also becomes our initial stack base
 1358          */
 1359         stack_base = (register_t *)vectp;
 1360 
 1361         stringp = imgp->args->begin_argv;
 1362         argc = imgp->args->argc;
 1363         envc = imgp->args->envc;
 1364 
 1365         /*
 1366          * Copy out strings - arguments and environment.
 1367          */
 1368         copyout(stringp, (void *)destp, ARG_MAX - imgp->args->stringspace);
 1369 
 1370         /*
 1371          * Fill in "ps_strings" struct for ps, w, etc.
 1372          */
 1373         suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
 1374         suword32(&arginfo->ps_nargvstr, argc);
 1375 
 1376         /*
 1377          * Fill in argument portion of vector table.
 1378          */
 1379         for (; argc > 0; --argc) {
 1380                 suword(vectp++, (long)(intptr_t)destp);
 1381                 while (*stringp++ != 0)
 1382                         destp++;
 1383                 destp++;
 1384         }
 1385 
 1386         /* a null vector table pointer separates the argp's from the envp's */
 1387         suword(vectp++, 0);
 1388 
 1389         suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
 1390         suword32(&arginfo->ps_nenvstr, envc);
 1391 
 1392         /*
 1393          * Fill in environment portion of vector table.
 1394          */
 1395         for (; envc > 0; --envc) {
 1396                 suword(vectp++, (long)(intptr_t)destp);
 1397                 while (*stringp++ != 0)
 1398                         destp++;
 1399                 destp++;
 1400         }
 1401 
 1402         /* end of vector table is a null pointer */
 1403         suword(vectp, 0);
 1404 
 1405         return (stack_base);
 1406 }
 1407 
 1408 /*
 1409  * Check permissions of file to execute.
 1410  *      Called with imgp->vp locked.
 1411  *      Return 0 for success or error code on failure.
 1412  */
 1413 int
 1414 exec_check_permissions(imgp)
 1415         struct image_params *imgp;
 1416 {
 1417         struct vnode *vp = imgp->vp;
 1418         struct vattr *attr = imgp->attr;
 1419         struct thread *td;
 1420         int error, writecount;
 1421 
 1422         td = curthread;
 1423 
 1424         /* Get file attributes */
 1425         error = VOP_GETATTR(vp, attr, td->td_ucred);
 1426         if (error)
 1427                 return (error);
 1428 
 1429 #ifdef MAC
 1430         error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
 1431         if (error)
 1432                 return (error);
 1433 #endif
 1434 
 1435         /*
 1436          * 1) Check if file execution is disabled for the filesystem that
 1437          *    this file resides on.
 1438          * 2) Ensure that at least one execute bit is on. Otherwise, a
 1439          *    privileged user will always succeed, and we don't want this
 1440          *    to happen unless the file really is executable.
 1441          * 3) Ensure that the file is a regular file.
 1442          */
 1443         if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
 1444             (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
 1445             (attr->va_type != VREG))
 1446                 return (EACCES);
 1447 
 1448         /*
 1449          * Zero length files can't be exec'd
 1450          */
 1451         if (attr->va_size == 0)
 1452                 return (ENOEXEC);
 1453 
 1454         /*
 1455          *  Check for execute permission to file based on current credentials.
 1456          */
 1457         error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
 1458         if (error)
 1459                 return (error);
 1460 
 1461         /*
 1462          * Check number of open-for-writes on the file and deny execution
 1463          * if there are any.
 1464          */
 1465         error = VOP_GET_WRITECOUNT(vp, &writecount);
 1466         if (error != 0)
 1467                 return (error);
 1468         if (writecount != 0)
 1469                 return (ETXTBSY);
 1470 
 1471         /*
 1472          * Call filesystem specific open routine (which does nothing in the
 1473          * general case).
 1474          */
 1475         error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
 1476         if (error == 0)
 1477                 imgp->opened = 1;
 1478         return (error);
 1479 }
 1480 
 1481 /*
 1482  * Exec handler registration
 1483  */
 1484 int
 1485 exec_register(execsw_arg)
 1486         const struct execsw *execsw_arg;
 1487 {
 1488         const struct execsw **es, **xs, **newexecsw;
 1489         int count = 2;  /* New slot and trailing NULL */
 1490 
 1491         if (execsw)
 1492                 for (es = execsw; *es; es++)
 1493                         count++;
 1494         newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
 1495         if (newexecsw == NULL)
 1496                 return (ENOMEM);
 1497         xs = newexecsw;
 1498         if (execsw)
 1499                 for (es = execsw; *es; es++)
 1500                         *xs++ = *es;
 1501         *xs++ = execsw_arg;
 1502         *xs = NULL;
 1503         if (execsw)
 1504                 free(execsw, M_TEMP);
 1505         execsw = newexecsw;
 1506         return (0);
 1507 }
 1508 
 1509 int
 1510 exec_unregister(execsw_arg)
 1511         const struct execsw *execsw_arg;
 1512 {
 1513         const struct execsw **es, **xs, **newexecsw;
 1514         int count = 1;
 1515 
 1516         if (execsw == NULL)
 1517                 panic("unregister with no handlers left?\n");
 1518 
 1519         for (es = execsw; *es; es++) {
 1520                 if (*es == execsw_arg)
 1521                         break;
 1522         }
 1523         if (*es == NULL)
 1524                 return (ENOENT);
 1525         for (es = execsw; *es; es++)
 1526                 if (*es != execsw_arg)
 1527                         count++;
 1528         newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
 1529         if (newexecsw == NULL)
 1530                 return (ENOMEM);
 1531         xs = newexecsw;
 1532         for (es = execsw; *es; es++)
 1533                 if (*es != execsw_arg)
 1534                         *xs++ = *es;
 1535         *xs = NULL;
 1536         if (execsw)
 1537                 free(execsw, M_TEMP);
 1538         execsw = newexecsw;
 1539         return (0);
 1540 }

Cache object: 26837f061e1f722a6919c9441f6497f7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.