The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 David O'Brien
    3  * Copyright (c) 1995-1996 Søren Schmidt
    4  * Copyright (c) 1996 Peter Wemm
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer
   12  *    in this position and unchanged.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  *
   30  * $FreeBSD: releng/5.1/sys/kern/imgact_elf.c 111119 2003-02-19 05:47:46Z imp $
   31  */
   32 
   33 #include <sys/param.h>
   34 #include <sys/exec.h>
   35 #include <sys/fcntl.h>
   36 #include <sys/imgact.h>
   37 #include <sys/imgact_elf.h>
   38 #include <sys/kernel.h>
   39 #include <sys/lock.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mutex.h>
   42 #include <sys/mman.h>
   43 #include <sys/namei.h>
   44 #include <sys/pioctl.h>
   45 #include <sys/proc.h>
   46 #include <sys/procfs.h>
   47 #include <sys/resourcevar.h>
   48 #include <sys/systm.h>
   49 #include <sys/signalvar.h>
   50 #include <sys/stat.h>
   51 #include <sys/sx.h>
   52 #include <sys/syscall.h>
   53 #include <sys/sysctl.h>
   54 #include <sys/sysent.h>
   55 #include <sys/vnode.h>
   56 
   57 #include <vm/vm.h>
   58 #include <vm/vm_kern.h>
   59 #include <vm/vm_param.h>
   60 #include <vm/pmap.h>
   61 #include <vm/vm_map.h>
   62 #include <vm/vm_object.h>
   63 #include <vm/vm_extern.h>
   64 
   65 #include <machine/elf.h>
   66 #include <machine/md_var.h>
   67 
   68 #define OLD_EI_BRAND    8
   69 
   70 static int __elfN(check_header)(const Elf_Ehdr *hdr);
   71 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
   72     const char *interp);
   73 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
   74     u_long *entry, size_t pagesize);
   75 static int __elfN(load_section)(struct proc *p,
   76     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
   77     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
   78     vm_prot_t prot, size_t pagesize);
   79 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
   80 
   81 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
   82     "");
   83 
   84 int __elfN(fallback_brand) = -1;
   85 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
   86     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
   87     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
   88 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
   89     &__elfN(fallback_brand));
   90 
   91 static int elf_trace = 0;
   92 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
   93 
   94 static int elf_legacy_coredump = 0;
   95 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 
   96     &elf_legacy_coredump, 0, "");
   97 
   98 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
   99 
  100 int
  101 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
  102 {
  103         int i;
  104 
  105         for (i = 0; i < MAX_BRANDS; i++) {
  106                 if (elf_brand_list[i] == NULL) {
  107                         elf_brand_list[i] = entry;
  108                         break;
  109                 }
  110         }
  111         if (i == MAX_BRANDS)
  112                 return (-1);
  113         return (0);
  114 }
  115 
  116 int
  117 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
  118 {
  119         int i;
  120 
  121         for (i = 0; i < MAX_BRANDS; i++) {
  122                 if (elf_brand_list[i] == entry) {
  123                         elf_brand_list[i] = NULL;
  124                         break;
  125                 }
  126         }
  127         if (i == MAX_BRANDS)
  128                 return (-1);
  129         return (0);
  130 }
  131 
  132 int
  133 __elfN(brand_inuse)(Elf_Brandinfo *entry)
  134 {
  135         struct proc *p;
  136         int rval = FALSE;
  137 
  138         sx_slock(&allproc_lock);
  139         LIST_FOREACH(p, &allproc, p_list) {
  140                 if (p->p_sysent == entry->sysvec) {
  141                         rval = TRUE;
  142                         break;
  143                 }
  144         }
  145         sx_sunlock(&allproc_lock);
  146 
  147         return (rval);
  148 }
  149 
  150 static Elf_Brandinfo *
  151 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
  152 {
  153         Elf_Brandinfo *bi;
  154         int i;
  155 
  156         /*
  157          * We support three types of branding -- (1) the ELF EI_OSABI field
  158          * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
  159          * branding w/in the ELF header, and (3) path of the `interp_path'
  160          * field.  We should also look for an ".note.ABI-tag" ELF section now
  161          * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
  162          */
  163 
  164         /* If the executable has a brand, search for it in the brand list. */
  165         for (i = 0; i < MAX_BRANDS; i++) {
  166                 bi = elf_brand_list[i];
  167                 if (bi != NULL && hdr->e_machine == bi->machine &&
  168                     (hdr->e_ident[EI_OSABI] == bi->brand ||
  169                     strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
  170                     bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
  171                         return (bi);
  172         }
  173 
  174         /* Lacking a known brand, search for a recognized interpreter. */
  175         if (interp != NULL) {
  176                 for (i = 0; i < MAX_BRANDS; i++) {
  177                         bi = elf_brand_list[i];
  178                         if (bi != NULL && hdr->e_machine == bi->machine &&
  179                             strcmp(interp, bi->interp_path) == 0)
  180                                 return (bi);
  181                 }
  182         }
  183 
  184         /* Lacking a recognized interpreter, try the default brand */
  185         for (i = 0; i < MAX_BRANDS; i++) {
  186                 bi = elf_brand_list[i];
  187                 if (bi != NULL && hdr->e_machine == bi->machine &&
  188                     __elfN(fallback_brand) == bi->brand)
  189                         return (bi);
  190         }
  191         return (NULL);
  192 }
  193 
  194 static int
  195 __elfN(check_header)(const Elf_Ehdr *hdr)
  196 {
  197         Elf_Brandinfo *bi;
  198         int i;
  199 
  200         if (!IS_ELF(*hdr) ||
  201             hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
  202             hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
  203             hdr->e_ident[EI_VERSION] != EV_CURRENT)
  204                 return (ENOEXEC);
  205 
  206         /*
  207          * Make sure we have at least one brand for this machine.
  208          */
  209 
  210         for (i = 0; i < MAX_BRANDS; i++) {
  211                 bi = elf_brand_list[i];
  212                 if (bi != NULL && bi->machine == hdr->e_machine)
  213                         break;
  214         }
  215         if (i == MAX_BRANDS)
  216                 return (ENOEXEC);
  217 
  218         if (hdr->e_version != ELF_TARG_VER)
  219                 return (ENOEXEC);
  220 
  221         return (0);
  222 }
  223 
  224 static int
  225 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  226         vm_offset_t start, vm_offset_t end, vm_prot_t prot,
  227         vm_prot_t max)
  228 {
  229         int error, rv;
  230         vm_offset_t off;
  231         vm_offset_t data_buf = 0;
  232 
  233         /*
  234          * Create the page if it doesn't exist yet. Ignore errors.
  235          */
  236         vm_map_lock(map);
  237         vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
  238             max, 0);
  239         vm_map_unlock(map);
  240 
  241         /*
  242          * Find the page from the underlying object.
  243          */
  244         if (object) {
  245                 vm_object_reference(object);
  246                 rv = vm_map_find(exec_map,
  247                                  object,
  248                                  trunc_page(offset),
  249                                  &data_buf,
  250                                  PAGE_SIZE,
  251                                  TRUE,
  252                                  VM_PROT_READ,
  253                                  VM_PROT_ALL,
  254                                  MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
  255                 if (rv != KERN_SUCCESS) {
  256                         vm_object_deallocate(object);
  257                         return (rv);
  258                 }
  259 
  260                 off = offset - trunc_page(offset);
  261                 error = copyout((caddr_t)data_buf + off, (caddr_t)start,
  262                     end - start);
  263                 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
  264                 if (error) {
  265                         return (KERN_FAILURE);
  266                 }
  267         }
  268 
  269         return (KERN_SUCCESS);
  270 }
  271 
  272 static int
  273 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  274         vm_offset_t start, vm_offset_t end, vm_prot_t prot,
  275         vm_prot_t max, int cow)
  276 {
  277         int rv;
  278 
  279         if (start != trunc_page(start)) {
  280                 rv = __elfN(map_partial)(map, object, offset, start,
  281                     round_page(start), prot, max);
  282                 if (rv)
  283                         return (rv);
  284                 offset += round_page(start) - start;
  285                 start = round_page(start);
  286         }
  287         if (end != round_page(end)) {
  288                 rv = __elfN(map_partial)(map, object, offset +
  289                     trunc_page(end) - start, trunc_page(end), end, prot, max);
  290                 if (rv)
  291                         return (rv);
  292                 end = trunc_page(end);
  293         }
  294         if (end > start) {
  295                 if (offset & PAGE_MASK) {
  296                         vm_offset_t data_buf, off;
  297                         vm_size_t sz;
  298                         int error;
  299 
  300                         /*
  301                          * The mapping is not page aligned. This means we have
  302                          * to copy the data. Sigh.
  303                          */
  304                         rv = vm_map_find(map, 0, 0, &start, end - start,
  305                             FALSE, prot, max, 0);
  306                         if (rv)
  307                                 return (rv);
  308                         while (start < end) {
  309                                 vm_object_reference(object);
  310                                 rv = vm_map_find(exec_map,
  311                                                  object,
  312                                                  trunc_page(offset),
  313                                                  &data_buf,
  314                                                  2 * PAGE_SIZE,
  315                                                  TRUE,
  316                                                  VM_PROT_READ,
  317                                                  VM_PROT_ALL,
  318                                                  (MAP_COPY_ON_WRITE
  319                                                   | MAP_PREFAULT_PARTIAL));
  320                                 if (rv != KERN_SUCCESS) {
  321                                         vm_object_deallocate(object);
  322                                         return (rv);
  323                                 }
  324                                 off = offset - trunc_page(offset);
  325                                 sz = end - start;
  326                                 if (sz > PAGE_SIZE)
  327                                         sz = PAGE_SIZE;
  328                                 error = copyout((caddr_t)data_buf + off,
  329                                     (caddr_t)start, sz);
  330                                 vm_map_remove(exec_map, data_buf,
  331                                     data_buf + 2 * PAGE_SIZE);
  332                                 if (error) {
  333                                         return (KERN_FAILURE);
  334                                 }
  335                                 start += sz;
  336                         }
  337                         rv = KERN_SUCCESS;
  338                 } else {
  339                         vm_map_lock(map);
  340                         rv = vm_map_insert(map, object, offset, start, end,
  341                             prot, max, cow);
  342                         vm_map_unlock(map);
  343                 }
  344                 return (rv);
  345         } else {
  346                 return (KERN_SUCCESS);
  347         }
  348 }
  349 
  350 static int
  351 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
  352         struct vnode *vp, vm_object_t object, vm_offset_t offset,
  353         caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
  354         size_t pagesize)
  355 {
  356         size_t map_len;
  357         vm_offset_t map_addr;
  358         int error, rv, cow;
  359         size_t copy_len;
  360         vm_offset_t file_addr;
  361         vm_offset_t data_buf = 0;
  362 
  363         GIANT_REQUIRED;
  364 
  365         error = 0;
  366 
  367         /*
  368          * It's necessary to fail if the filsz + offset taken from the
  369          * header is greater than the actual file pager object's size.
  370          * If we were to allow this, then the vm_map_find() below would
  371          * walk right off the end of the file object and into the ether.
  372          *
  373          * While I'm here, might as well check for something else that
  374          * is invalid: filsz cannot be greater than memsz.
  375          */
  376         if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
  377             filsz > memsz) {
  378                 uprintf("elf_load_section: truncated ELF file\n");
  379                 return (ENOEXEC);
  380         }
  381 
  382 #define trunc_page_ps(va, ps)   ((va) & ~(ps - 1))
  383 #define round_page_ps(va, ps)   (((va) + (ps - 1)) & ~(ps - 1))
  384 
  385         map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
  386         file_addr = trunc_page_ps(offset, pagesize);
  387 
  388         /*
  389          * We have two choices.  We can either clear the data in the last page
  390          * of an oversized mapping, or we can start the anon mapping a page
  391          * early and copy the initialized data into that first page.  We
  392          * choose the second..
  393          */
  394         if (memsz > filsz)
  395                 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
  396         else
  397                 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
  398 
  399         if (map_len != 0) {
  400                 vm_object_reference(object);
  401 
  402                 /* cow flags: don't dump readonly sections in core */
  403                 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
  404                     (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
  405 
  406                 rv = __elfN(map_insert)(&vmspace->vm_map,
  407                                       object,
  408                                       file_addr,        /* file offset */
  409                                       map_addr,         /* virtual start */
  410                                       map_addr + map_len,/* virtual end */
  411                                       prot,
  412                                       VM_PROT_ALL,
  413                                       cow);
  414                 if (rv != KERN_SUCCESS) {
  415                         vm_object_deallocate(object);
  416                         return (EINVAL);
  417                 }
  418 
  419                 /* we can stop now if we've covered it all */
  420                 if (memsz == filsz) {
  421                         return (0);
  422                 }
  423         }
  424 
  425 
  426         /*
  427          * We have to get the remaining bit of the file into the first part
  428          * of the oversized map segment.  This is normally because the .data
  429          * segment in the file is extended to provide bss.  It's a neat idea
  430          * to try and save a page, but it's a pain in the behind to implement.
  431          */
  432         copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
  433         map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
  434         map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
  435             map_addr;
  436 
  437         /* This had damn well better be true! */
  438         if (map_len != 0) {
  439                 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
  440                     map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
  441                 if (rv != KERN_SUCCESS) {
  442                         return (EINVAL);
  443                 }
  444         }
  445 
  446         if (copy_len != 0) {
  447                 vm_offset_t off;
  448                 vm_object_reference(object);
  449                 rv = vm_map_find(exec_map,
  450                                  object,
  451                                  trunc_page(offset + filsz),
  452                                  &data_buf,
  453                                  PAGE_SIZE,
  454                                  TRUE,
  455                                  VM_PROT_READ,
  456                                  VM_PROT_ALL,
  457                                  MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
  458                 if (rv != KERN_SUCCESS) {
  459                         vm_object_deallocate(object);
  460                         return (EINVAL);
  461                 }
  462 
  463                 /* send the page fragment to user space */
  464                 off = trunc_page_ps(offset + filsz, pagesize) -
  465                     trunc_page(offset + filsz);
  466                 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
  467                     copy_len);
  468                 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
  469                 if (error) {
  470                         return (error);
  471                 }
  472         }
  473 
  474         /*
  475          * set it to the specified protection.
  476          * XXX had better undo the damage from pasting over the cracks here!
  477          */
  478         vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
  479             round_page(map_addr + map_len),  prot, FALSE);
  480 
  481         return (error);
  482 }
  483 
  484 /*
  485  * Load the file "file" into memory.  It may be either a shared object
  486  * or an executable.
  487  *
  488  * The "addr" reference parameter is in/out.  On entry, it specifies
  489  * the address where a shared object should be loaded.  If the file is
  490  * an executable, this value is ignored.  On exit, "addr" specifies
  491  * where the file was actually loaded.
  492  *
  493  * The "entry" reference parameter is out only.  On exit, it specifies
  494  * the entry point for the loaded file.
  495  */
  496 static int
  497 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
  498         u_long *entry, size_t pagesize)
  499 {
  500         struct {
  501                 struct nameidata nd;
  502                 struct vattr attr;
  503                 struct image_params image_params;
  504         } *tempdata;
  505         const Elf_Ehdr *hdr = NULL;
  506         const Elf_Phdr *phdr = NULL;
  507         struct nameidata *nd;
  508         struct vmspace *vmspace = p->p_vmspace;
  509         struct vattr *attr;
  510         struct image_params *imgp;
  511         vm_prot_t prot;
  512         u_long rbase;
  513         u_long base_addr = 0;
  514         int error, i, numsegs;
  515 
  516         if (curthread->td_proc != p)
  517                 panic("elf_load_file - thread");        /* XXXKSE DIAGNOSTIC */
  518 
  519         tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
  520         nd = &tempdata->nd;
  521         attr = &tempdata->attr;
  522         imgp = &tempdata->image_params;
  523 
  524         /*
  525          * Initialize part of the common data
  526          */
  527         imgp->proc = p;
  528         imgp->userspace_argv = NULL;
  529         imgp->userspace_envv = NULL;
  530         imgp->attr = attr;
  531         imgp->firstpage = NULL;
  532         imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE);
  533         imgp->object = NULL;
  534         imgp->execlabel = NULL;
  535 
  536         if (imgp->image_header == NULL) {
  537                 nd->ni_vp = NULL;
  538                 error = ENOMEM;
  539                 goto fail;
  540         }
  541 
  542         /* XXXKSE */
  543         NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
  544 
  545         if ((error = namei(nd)) != 0) {
  546                 nd->ni_vp = NULL;
  547                 goto fail;
  548         }
  549         NDFREE(nd, NDF_ONLY_PNBUF);
  550         imgp->vp = nd->ni_vp;
  551 
  552         /*
  553          * Check permissions, modes, uid, etc on the file, and "open" it.
  554          */
  555         error = exec_check_permissions(imgp);
  556         if (error) {
  557                 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
  558                 goto fail;
  559         }
  560 
  561         error = exec_map_first_page(imgp);
  562         /*
  563          * Also make certain that the interpreter stays the same, so set
  564          * its VV_TEXT flag, too.
  565          */
  566         if (error == 0)
  567                 nd->ni_vp->v_vflag |= VV_TEXT;
  568 
  569         VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
  570         vm_object_reference(imgp->object);
  571 
  572         VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
  573         if (error)
  574                 goto fail;
  575 
  576         hdr = (const Elf_Ehdr *)imgp->image_header;
  577         if ((error = __elfN(check_header)(hdr)) != 0)
  578                 goto fail;
  579         if (hdr->e_type == ET_DYN)
  580                 rbase = *addr;
  581         else if (hdr->e_type == ET_EXEC)
  582                 rbase = 0;
  583         else {
  584                 error = ENOEXEC;
  585                 goto fail;
  586         }
  587 
  588         /* Only support headers that fit within first page for now */
  589         if ((hdr->e_phoff > PAGE_SIZE) ||
  590             (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
  591                 error = ENOEXEC;
  592                 goto fail;
  593         }
  594 
  595         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  596 
  597         for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
  598                 if (phdr[i].p_type == PT_LOAD) {        /* Loadable segment */
  599                         prot = 0;
  600                         if (phdr[i].p_flags & PF_X)
  601                                 prot |= VM_PROT_EXECUTE;
  602                         if (phdr[i].p_flags & PF_W)
  603                                 prot |= VM_PROT_WRITE;
  604                         if (phdr[i].p_flags & PF_R)
  605                                 prot |= VM_PROT_READ;
  606 
  607                         if ((error = __elfN(load_section)(p, vmspace,
  608                             nd->ni_vp, imgp->object, phdr[i].p_offset,
  609                             (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
  610                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  611                             pagesize)) != 0)
  612                                 goto fail;
  613                         /*
  614                          * Establish the base address if this is the
  615                          * first segment.
  616                          */
  617                         if (numsegs == 0)
  618                                 base_addr = trunc_page(phdr[i].p_vaddr +
  619                                     rbase);
  620                         numsegs++;
  621                 }
  622         }
  623         *addr = base_addr;
  624         *entry = (unsigned long)hdr->e_entry + rbase;
  625 
  626 fail:
  627         if (imgp->firstpage)
  628                 exec_unmap_first_page(imgp);
  629         if (imgp->image_header)
  630                 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header,
  631                     PAGE_SIZE);
  632         if (imgp->object)
  633                 vm_object_deallocate(imgp->object);
  634 
  635         if (nd->ni_vp)
  636                 vrele(nd->ni_vp);
  637 
  638         free(tempdata, M_TEMP);
  639 
  640         return (error);
  641 }
  642 
  643 static int
  644 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
  645 {
  646         const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
  647         const Elf_Phdr *phdr;
  648         Elf_Auxargs *elf_auxargs = NULL;
  649         struct vmspace *vmspace;
  650         vm_prot_t prot;
  651         u_long text_size = 0, data_size = 0, total_size = 0;
  652         u_long text_addr = 0, data_addr = 0;
  653         u_long seg_size, seg_addr;
  654         u_long addr, entry = 0, proghdr = 0;
  655         int error, i;
  656         const char *interp = NULL;
  657         Elf_Brandinfo *brand_info;
  658         char *path;
  659         struct thread *td = curthread;
  660         struct sysentvec *sv;
  661 
  662         GIANT_REQUIRED;
  663 
  664         /*
  665          * Do we have a valid ELF header ?
  666          */
  667         if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
  668                 return (-1);
  669 
  670         /*
  671          * From here on down, we return an errno, not -1, as we've
  672          * detected an ELF file.
  673          */
  674 
  675         if ((hdr->e_phoff > PAGE_SIZE) ||
  676             (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
  677                 /* Only support headers in first page for now */
  678                 return (ENOEXEC);
  679         }
  680         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  681 
  682         /*
  683          * From this point on, we may have resources that need to be freed.
  684          */
  685 
  686         VOP_UNLOCK(imgp->vp, 0, td);
  687 
  688         for (i = 0; i < hdr->e_phnum; i++) {
  689                 switch (phdr[i].p_type) {
  690                 case PT_INTERP: /* Path to interpreter */
  691                         if (phdr[i].p_filesz > MAXPATHLEN ||
  692                             phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
  693                                 error = ENOEXEC;
  694                                 goto fail;
  695                         }
  696                         interp = imgp->image_header + phdr[i].p_offset;
  697                         break;
  698                 default:
  699                         break;
  700                 }
  701         }
  702 
  703         brand_info = __elfN(get_brandinfo)(hdr, interp);
  704         if (brand_info == NULL) {
  705                 uprintf("ELF binary type \"%u\" not known.\n",
  706                     hdr->e_ident[EI_OSABI]);
  707                 error = ENOEXEC;
  708                 goto fail;
  709         }
  710         sv = brand_info->sysvec;
  711 
  712         if ((error = exec_extract_strings(imgp)) != 0)
  713                 goto fail;
  714 
  715         exec_new_vmspace(imgp, sv);
  716 
  717         vmspace = imgp->proc->p_vmspace;
  718 
  719         for (i = 0; i < hdr->e_phnum; i++) {
  720                 switch (phdr[i].p_type) {
  721                 case PT_LOAD:   /* Loadable segment */
  722                         prot = 0;
  723                         if (phdr[i].p_flags & PF_X)
  724                                 prot |= VM_PROT_EXECUTE;
  725                         if (phdr[i].p_flags & PF_W)
  726                                 prot |= VM_PROT_WRITE;
  727                         if (phdr[i].p_flags & PF_R)
  728                                 prot |= VM_PROT_READ;
  729 
  730 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
  731                         /*
  732                          * Some x86 binaries assume read == executable,
  733                          * notably the M3 runtime and therefore cvsup
  734                          */
  735                         if (prot & VM_PROT_READ)
  736                                 prot |= VM_PROT_EXECUTE;
  737 #endif
  738 
  739                         if ((error = __elfN(load_section)(imgp->proc, vmspace,
  740                             imgp->vp, imgp->object, phdr[i].p_offset,
  741                             (caddr_t)(uintptr_t)phdr[i].p_vaddr,
  742                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  743                             sv->sv_pagesize)) != 0)
  744                                 goto fail;
  745 
  746                         seg_addr = trunc_page(phdr[i].p_vaddr);
  747                         seg_size = round_page(phdr[i].p_memsz +
  748                             phdr[i].p_vaddr - seg_addr);
  749 
  750                         /*
  751                          * Is this .text or .data?  We can't use
  752                          * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
  753                          * alpha terribly and possibly does other bad
  754                          * things so we stick to the old way of figuring
  755                          * it out:  If the segment contains the program
  756                          * entry point, it's a text segment, otherwise it
  757                          * is a data segment.
  758                          *
  759                          * Note that obreak() assumes that data_addr + 
  760                          * data_size == end of data load area, and the ELF
  761                          * file format expects segments to be sorted by
  762                          * address.  If multiple data segments exist, the
  763                          * last one will be used.
  764                          */
  765                         if (hdr->e_entry >= phdr[i].p_vaddr &&
  766                             hdr->e_entry < (phdr[i].p_vaddr +
  767                             phdr[i].p_memsz)) {
  768                                 text_size = seg_size;
  769                                 text_addr = seg_addr;
  770                                 entry = (u_long)hdr->e_entry;
  771                         } else {
  772                                 data_size = seg_size;
  773                                 data_addr = seg_addr;
  774                         }
  775                         total_size += seg_size;
  776                         break;
  777                 case PT_PHDR:   /* Program header table info */
  778                         proghdr = phdr[i].p_vaddr;
  779                         break;
  780                 default:
  781                         break;
  782                 }
  783         }
  784         
  785         if (data_addr == 0 && data_size == 0) {
  786                 data_addr = text_addr;
  787                 data_size = text_size;
  788         }
  789 
  790         /*
  791          * Check limits.  It should be safe to check the
  792          * limits after loading the segments since we do
  793          * not actually fault in all the segments pages.
  794          */
  795         if (data_size >
  796             imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
  797             text_size > maxtsiz ||
  798             total_size >
  799             imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
  800                 error = ENOMEM;
  801                 goto fail;
  802         }
  803 
  804         vmspace->vm_tsize = text_size >> PAGE_SHIFT;
  805         vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
  806         vmspace->vm_dsize = data_size >> PAGE_SHIFT;
  807         vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
  808 
  809         addr = ELF_RTLD_ADDR(vmspace);
  810 
  811         imgp->entry_addr = entry;
  812 
  813         imgp->proc->p_sysent = sv;
  814         if (interp != NULL) {
  815                 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  816                 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
  817                     interp);
  818                 if ((error = __elfN(load_file)(imgp->proc, path, &addr,
  819                     &imgp->entry_addr, sv->sv_pagesize)) != 0) {
  820                         if ((error = __elfN(load_file)(imgp->proc, interp,
  821                             &addr, &imgp->entry_addr, sv->sv_pagesize)) != 0) {
  822                                 uprintf("ELF interpreter %s not found\n",
  823                                     path);
  824                                 free(path, M_TEMP);
  825                                 goto fail;
  826                         }
  827                 }
  828                 free(path, M_TEMP);
  829         }
  830 
  831         /*
  832          * Construct auxargs table (used by the fixup routine)
  833          */
  834         elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
  835         elf_auxargs->execfd = -1;
  836         elf_auxargs->phdr = proghdr;
  837         elf_auxargs->phent = hdr->e_phentsize;
  838         elf_auxargs->phnum = hdr->e_phnum;
  839         elf_auxargs->pagesz = PAGE_SIZE;
  840         elf_auxargs->base = addr;
  841         elf_auxargs->flags = 0;
  842         elf_auxargs->entry = entry;
  843         elf_auxargs->trace = elf_trace;
  844 
  845         imgp->auxargs = elf_auxargs;
  846         imgp->interpreted = 0;
  847 
  848 fail:
  849         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
  850         return (error);
  851 }
  852 
  853 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
  854 
  855 int
  856 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
  857 {
  858         Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
  859         Elf_Addr *base;
  860         Elf_Addr *pos;
  861 
  862         base = (Elf_Addr *)*stack_base;
  863         pos = base + (imgp->argc + imgp->envc + 2);
  864 
  865         if (args->trace) {
  866                 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
  867         }
  868         if (args->execfd != -1) {
  869                 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
  870         }
  871         AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
  872         AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
  873         AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
  874         AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
  875         AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
  876         AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
  877         AUXARGS_ENTRY(pos, AT_BASE, args->base);
  878         AUXARGS_ENTRY(pos, AT_NULL, 0);
  879 
  880         free(imgp->auxargs, M_TEMP);
  881         imgp->auxargs = NULL;
  882 
  883         base--;
  884         suword(base, (long)imgp->argc);
  885         *stack_base = (register_t *)base;
  886         return (0);
  887 }
  888 
  889 /*
  890  * Code for generating ELF core dumps.
  891  */
  892 
  893 typedef void (*segment_callback)(vm_map_entry_t, void *);
  894 
  895 /* Closure for cb_put_phdr(). */
  896 struct phdr_closure {
  897         Elf_Phdr *phdr;         /* Program header to fill in */
  898         Elf_Off offset;         /* Offset of segment in core file */
  899 };
  900 
  901 /* Closure for cb_size_segment(). */
  902 struct sseg_closure {
  903         int count;              /* Count of writable segments. */
  904         size_t size;            /* Total size of all writable segments. */
  905 };
  906 
  907 static void cb_put_phdr(vm_map_entry_t, void *);
  908 static void cb_size_segment(vm_map_entry_t, void *);
  909 static void each_writable_segment(struct proc *, segment_callback, void *);
  910 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
  911     int, void *, size_t);
  912 static void __elfN(puthdr)(struct proc *, void *, size_t *,
  913     const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int);
  914 static void __elfN(putnote)(void *, size_t *, const char *, int,
  915     const void *, size_t);
  916 
  917 extern int osreldate;
  918 
  919 int
  920 __elfN(coredump)(td, vp, limit)
  921         struct thread *td;
  922         register struct vnode *vp;
  923         off_t limit;
  924 {
  925         register struct proc *p = td->td_proc;
  926         register struct ucred *cred = td->td_ucred;
  927         int error = 0;
  928         struct sseg_closure seginfo;
  929         void *hdr;
  930         size_t hdrsize;
  931 
  932         /* Size the program segments. */
  933         seginfo.count = 0;
  934         seginfo.size = 0;
  935         each_writable_segment(p, cb_size_segment, &seginfo);
  936 
  937         /*
  938          * Calculate the size of the core file header area by making
  939          * a dry run of generating it.  Nothing is written, but the
  940          * size is calculated.
  941          */
  942         hdrsize = 0;
  943         __elfN(puthdr)((struct proc *)NULL, (void *)NULL, &hdrsize,
  944             (const prstatus_t *)NULL, (const prfpregset_t *)NULL,
  945             (const prpsinfo_t *)NULL, seginfo.count);
  946 
  947         if (hdrsize + seginfo.size >= limit)
  948                 return (EFAULT);
  949 
  950         /*
  951          * Allocate memory for building the header, fill it up,
  952          * and write it out.
  953          */
  954         hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
  955         if (hdr == NULL) {
  956                 return (EINVAL);
  957         }
  958         error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
  959 
  960         /* Write the contents of all of the writable segments. */
  961         if (error == 0) {
  962                 Elf_Phdr *php;
  963                 off_t offset;
  964                 int i;
  965 
  966                 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
  967                 offset = hdrsize;
  968                 for (i = 0; i < seginfo.count; i++) {
  969                         error = vn_rdwr_inchunks(UIO_WRITE, vp,
  970                             (caddr_t)(uintptr_t)php->p_vaddr,
  971                             php->p_filesz, offset, UIO_USERSPACE,
  972                             IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL,
  973                             curthread); /* XXXKSE */
  974                         if (error != 0)
  975                                 break;
  976                         offset += php->p_filesz;
  977                         php++;
  978                 }
  979         }
  980         free(hdr, M_TEMP);
  981 
  982         return (error);
  983 }
  984 
  985 /*
  986  * A callback for each_writable_segment() to write out the segment's
  987  * program header entry.
  988  */
  989 static void
  990 cb_put_phdr(entry, closure)
  991         vm_map_entry_t entry;
  992         void *closure;
  993 {
  994         struct phdr_closure *phc = (struct phdr_closure *)closure;
  995         Elf_Phdr *phdr = phc->phdr;
  996 
  997         phc->offset = round_page(phc->offset);
  998 
  999         phdr->p_type = PT_LOAD;
 1000         phdr->p_offset = phc->offset;
 1001         phdr->p_vaddr = entry->start;
 1002         phdr->p_paddr = 0;
 1003         phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
 1004         phdr->p_align = PAGE_SIZE;
 1005         phdr->p_flags = 0;
 1006         if (entry->protection & VM_PROT_READ)
 1007                 phdr->p_flags |= PF_R;
 1008         if (entry->protection & VM_PROT_WRITE)
 1009                 phdr->p_flags |= PF_W;
 1010         if (entry->protection & VM_PROT_EXECUTE)
 1011                 phdr->p_flags |= PF_X;
 1012 
 1013         phc->offset += phdr->p_filesz;
 1014         phc->phdr++;
 1015 }
 1016 
 1017 /*
 1018  * A callback for each_writable_segment() to gather information about
 1019  * the number of segments and their total size.
 1020  */
 1021 static void
 1022 cb_size_segment(entry, closure)
 1023         vm_map_entry_t entry;
 1024         void *closure;
 1025 {
 1026         struct sseg_closure *ssc = (struct sseg_closure *)closure;
 1027 
 1028         ssc->count++;
 1029         ssc->size += entry->end - entry->start;
 1030 }
 1031 
 1032 /*
 1033  * For each writable segment in the process's memory map, call the given
 1034  * function with a pointer to the map entry and some arbitrary
 1035  * caller-supplied data.
 1036  */
 1037 static void
 1038 each_writable_segment(p, func, closure)
 1039         struct proc *p;
 1040         segment_callback func;
 1041         void *closure;
 1042 {
 1043         vm_map_t map = &p->p_vmspace->vm_map;
 1044         vm_map_entry_t entry;
 1045 
 1046         for (entry = map->header.next; entry != &map->header;
 1047             entry = entry->next) {
 1048                 vm_object_t obj;
 1049 
 1050                 /*
 1051                  * Don't dump inaccessible mappings, deal with legacy
 1052                  * coredump mode.
 1053                  *
 1054                  * Note that read-only segments related to the elf binary
 1055                  * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
 1056                  * need to arbitrarily ignore such segments.
 1057                  */
 1058                 if (elf_legacy_coredump) {
 1059                         if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
 1060                                 continue;
 1061                 } else {
 1062                         if ((entry->protection & VM_PROT_ALL) == 0)
 1063                                 continue;
 1064                 }
 1065 
 1066                 /*
 1067                  * Dont include memory segment in the coredump if
 1068                  * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
 1069                  * madvise(2).  Do not dump submaps (i.e. parts of the
 1070                  * kernel map).
 1071                  */
 1072                 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
 1073                         continue;
 1074 
 1075                 if ((obj = entry->object.vm_object) == NULL)
 1076                         continue;
 1077 
 1078                 /* Find the deepest backing object. */
 1079                 while (obj->backing_object != NULL)
 1080                         obj = obj->backing_object;
 1081 
 1082                 /* Ignore memory-mapped devices and such things. */
 1083                 if (obj->type != OBJT_DEFAULT &&
 1084                     obj->type != OBJT_SWAP &&
 1085                     obj->type != OBJT_VNODE)
 1086                         continue;
 1087 
 1088                 (*func)(entry, closure);
 1089         }
 1090 }
 1091 
 1092 /*
 1093  * Write the core file header to the file, including padding up to
 1094  * the page boundary.
 1095  */
 1096 static int
 1097 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
 1098         struct thread *td;
 1099         struct vnode *vp;
 1100         struct ucred *cred;
 1101         int numsegs;
 1102         size_t hdrsize;
 1103         void *hdr;
 1104 {
 1105         struct {
 1106                 prstatus_t status;
 1107                 prfpregset_t fpregset;
 1108                 prpsinfo_t psinfo;
 1109         } *tempdata;
 1110         struct proc *p = td->td_proc;
 1111         size_t off;
 1112         prstatus_t *status;
 1113         prfpregset_t *fpregset;
 1114         prpsinfo_t *psinfo;
 1115 
 1116         tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK);
 1117         status = &tempdata->status;
 1118         fpregset = &tempdata->fpregset;
 1119         psinfo = &tempdata->psinfo;
 1120 
 1121         /* Gather the information for the header. */
 1122         status->pr_version = PRSTATUS_VERSION;
 1123         status->pr_statussz = sizeof(prstatus_t);
 1124         status->pr_gregsetsz = sizeof(gregset_t);
 1125         status->pr_fpregsetsz = sizeof(fpregset_t);
 1126         status->pr_osreldate = osreldate;
 1127         status->pr_cursig = p->p_sig;
 1128         status->pr_pid = p->p_pid;
 1129         fill_regs(td, &status->pr_reg);
 1130 
 1131         fill_fpregs(td, fpregset);
 1132 
 1133         psinfo->pr_version = PRPSINFO_VERSION;
 1134         psinfo->pr_psinfosz = sizeof(prpsinfo_t);
 1135         strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
 1136 
 1137         /* XXX - We don't fill in the command line arguments properly yet. */
 1138         strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs));
 1139 
 1140         /* Fill in the header. */
 1141         bzero(hdr, hdrsize);
 1142         off = 0;
 1143         __elfN(puthdr)(p, hdr, &off, status, fpregset, psinfo, numsegs);
 1144 
 1145         free(tempdata, M_TEMP);
 1146 
 1147         /* Write it to the core file. */
 1148         return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
 1149             UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
 1150             td)); /* XXXKSE */
 1151 }
 1152 
 1153 static void
 1154 __elfN(puthdr)(struct proc *p, void *dst, size_t *off, const prstatus_t *status,
 1155     const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs)
 1156 {
 1157         size_t ehoff;
 1158         size_t phoff;
 1159         size_t noteoff;
 1160         size_t notesz;
 1161 
 1162         ehoff = *off;
 1163         *off += sizeof(Elf_Ehdr);
 1164 
 1165         phoff = *off;
 1166         *off += (numsegs + 1) * sizeof(Elf_Phdr);
 1167 
 1168         noteoff = *off;
 1169         __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
 1170             sizeof *status);
 1171         __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
 1172             sizeof *fpregset);
 1173         __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
 1174             sizeof *psinfo);
 1175         notesz = *off - noteoff;
 1176 
 1177         /* Align up to a page boundary for the program segments. */
 1178         *off = round_page(*off);
 1179 
 1180         if (dst != NULL) {
 1181                 Elf_Ehdr *ehdr;
 1182                 Elf_Phdr *phdr;
 1183                 struct phdr_closure phc;
 1184 
 1185                 /*
 1186                  * Fill in the ELF header.
 1187                  */
 1188                 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
 1189                 ehdr->e_ident[EI_MAG0] = ELFMAG0;
 1190                 ehdr->e_ident[EI_MAG1] = ELFMAG1;
 1191                 ehdr->e_ident[EI_MAG2] = ELFMAG2;
 1192                 ehdr->e_ident[EI_MAG3] = ELFMAG3;
 1193                 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
 1194                 ehdr->e_ident[EI_DATA] = ELF_DATA;
 1195                 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
 1196                 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
 1197                 ehdr->e_ident[EI_ABIVERSION] = 0;
 1198                 ehdr->e_ident[EI_PAD] = 0;
 1199                 ehdr->e_type = ET_CORE;
 1200                 ehdr->e_machine = ELF_ARCH;
 1201                 ehdr->e_version = EV_CURRENT;
 1202                 ehdr->e_entry = 0;
 1203                 ehdr->e_phoff = phoff;
 1204                 ehdr->e_flags = 0;
 1205                 ehdr->e_ehsize = sizeof(Elf_Ehdr);
 1206                 ehdr->e_phentsize = sizeof(Elf_Phdr);
 1207                 ehdr->e_phnum = numsegs + 1;
 1208                 ehdr->e_shentsize = sizeof(Elf_Shdr);
 1209                 ehdr->e_shnum = 0;
 1210                 ehdr->e_shstrndx = SHN_UNDEF;
 1211 
 1212                 /*
 1213                  * Fill in the program header entries.
 1214                  */
 1215                 phdr = (Elf_Phdr *)((char *)dst + phoff);
 1216 
 1217                 /* The note segement. */
 1218                 phdr->p_type = PT_NOTE;
 1219                 phdr->p_offset = noteoff;
 1220                 phdr->p_vaddr = 0;
 1221                 phdr->p_paddr = 0;
 1222                 phdr->p_filesz = notesz;
 1223                 phdr->p_memsz = 0;
 1224                 phdr->p_flags = 0;
 1225                 phdr->p_align = 0;
 1226                 phdr++;
 1227 
 1228                 /* All the writable segments from the program. */
 1229                 phc.phdr = phdr;
 1230                 phc.offset = *off;
 1231                 each_writable_segment(p, cb_put_phdr, &phc);
 1232         }
 1233 }
 1234 
 1235 static void
 1236 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
 1237     const void *desc, size_t descsz)
 1238 {
 1239         Elf_Note note;
 1240 
 1241         note.n_namesz = strlen(name) + 1;
 1242         note.n_descsz = descsz;
 1243         note.n_type = type;
 1244         if (dst != NULL)
 1245                 bcopy(&note, (char *)dst + *off, sizeof note);
 1246         *off += sizeof note;
 1247         if (dst != NULL)
 1248                 bcopy(name, (char *)dst + *off, note.n_namesz);
 1249         *off += roundup2(note.n_namesz, sizeof(Elf_Size));
 1250         if (dst != NULL)
 1251                 bcopy(desc, (char *)dst + *off, note.n_descsz);
 1252         *off += roundup2(note.n_descsz, sizeof(Elf_Size));
 1253 }
 1254 
 1255 /*
 1256  * Tell kern_execve.c about it, with a little help from the linker.
 1257  */
 1258 static struct execsw __elfN(execsw) = {
 1259         __CONCAT(exec_, __elfN(imgact)),
 1260         __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
 1261 };
 1262 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));

Cache object: 889a6d030b299e11a85cbfd643ba21ae


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.