The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 David O'Brien
    3  * Copyright (c) 1995-1996 Søren Schmidt
    4  * Copyright (c) 1996 Peter Wemm
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer
   12  *    in this position and unchanged.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/6.1/sys/kern/imgact_elf.c 158179 2006-04-30 16:44:43Z cvs2svn $");
   33 
   34 #include "opt_compat.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/exec.h>
   38 #include <sys/fcntl.h>
   39 #include <sys/imgact.h>
   40 #include <sys/imgact_elf.h>
   41 #include <sys/kernel.h>
   42 #include <sys/lock.h>
   43 #include <sys/malloc.h>
   44 #include <sys/mount.h>
   45 #include <sys/mutex.h>
   46 #include <sys/mman.h>
   47 #include <sys/namei.h>
   48 #include <sys/pioctl.h>
   49 #include <sys/proc.h>
   50 #include <sys/procfs.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/sf_buf.h>
   53 #include <sys/systm.h>
   54 #include <sys/signalvar.h>
   55 #include <sys/stat.h>
   56 #include <sys/sx.h>
   57 #include <sys/syscall.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/sysent.h>
   60 #include <sys/vnode.h>
   61 
   62 #include <vm/vm.h>
   63 #include <vm/vm_kern.h>
   64 #include <vm/vm_param.h>
   65 #include <vm/pmap.h>
   66 #include <vm/vm_map.h>
   67 #include <vm/vm_object.h>
   68 #include <vm/vm_extern.h>
   69 
   70 #include <machine/elf.h>
   71 #include <machine/md_var.h>
   72 
   73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
   74 #include <machine/fpu.h>
   75 #include <compat/ia32/ia32_reg.h>
   76 #endif
   77 
   78 #define OLD_EI_BRAND    8
   79 
   80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
   81 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
   82     const char *interp);
   83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
   84     u_long *entry, size_t pagesize);
   85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
   86     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
   87     vm_prot_t prot, size_t pagesize);
   88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
   89 
   90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
   91     "");
   92 
   93 int __elfN(fallback_brand) = -1;
   94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
   95     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
   96     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
   97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
   98     &__elfN(fallback_brand));
   99 
  100 static int elf_trace = 0;
  101 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
  102 
  103 static int elf_legacy_coredump = 0;
  104 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 
  105     &elf_legacy_coredump, 0, "");
  106 
  107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
  108 
  109 int
  110 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
  111 {
  112         int i;
  113 
  114         for (i = 0; i < MAX_BRANDS; i++) {
  115                 if (elf_brand_list[i] == NULL) {
  116                         elf_brand_list[i] = entry;
  117                         break;
  118                 }
  119         }
  120         if (i == MAX_BRANDS)
  121                 return (-1);
  122         return (0);
  123 }
  124 
  125 int
  126 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
  127 {
  128         int i;
  129 
  130         for (i = 0; i < MAX_BRANDS; i++) {
  131                 if (elf_brand_list[i] == entry) {
  132                         elf_brand_list[i] = NULL;
  133                         break;
  134                 }
  135         }
  136         if (i == MAX_BRANDS)
  137                 return (-1);
  138         return (0);
  139 }
  140 
  141 int
  142 __elfN(brand_inuse)(Elf_Brandinfo *entry)
  143 {
  144         struct proc *p;
  145         int rval = FALSE;
  146 
  147         sx_slock(&allproc_lock);
  148         LIST_FOREACH(p, &allproc, p_list) {
  149                 if (p->p_sysent == entry->sysvec) {
  150                         rval = TRUE;
  151                         break;
  152                 }
  153         }
  154         sx_sunlock(&allproc_lock);
  155 
  156         return (rval);
  157 }
  158 
  159 static Elf_Brandinfo *
  160 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
  161 {
  162         Elf_Brandinfo *bi;
  163         int i;
  164 
  165         /*
  166          * We support three types of branding -- (1) the ELF EI_OSABI field
  167          * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
  168          * branding w/in the ELF header, and (3) path of the `interp_path'
  169          * field.  We should also look for an ".note.ABI-tag" ELF section now
  170          * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
  171          */
  172 
  173         /* If the executable has a brand, search for it in the brand list. */
  174         for (i = 0; i < MAX_BRANDS; i++) {
  175                 bi = elf_brand_list[i];
  176                 if (bi != NULL && hdr->e_machine == bi->machine &&
  177                     (hdr->e_ident[EI_OSABI] == bi->brand ||
  178                     strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
  179                     bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
  180                         return (bi);
  181         }
  182 
  183         /* Lacking a known brand, search for a recognized interpreter. */
  184         if (interp != NULL) {
  185                 for (i = 0; i < MAX_BRANDS; i++) {
  186                         bi = elf_brand_list[i];
  187                         if (bi != NULL && hdr->e_machine == bi->machine &&
  188                             strcmp(interp, bi->interp_path) == 0)
  189                                 return (bi);
  190                 }
  191         }
  192 
  193         /* Lacking a recognized interpreter, try the default brand */
  194         for (i = 0; i < MAX_BRANDS; i++) {
  195                 bi = elf_brand_list[i];
  196                 if (bi != NULL && hdr->e_machine == bi->machine &&
  197                     __elfN(fallback_brand) == bi->brand)
  198                         return (bi);
  199         }
  200         return (NULL);
  201 }
  202 
  203 static int
  204 __elfN(check_header)(const Elf_Ehdr *hdr)
  205 {
  206         Elf_Brandinfo *bi;
  207         int i;
  208 
  209         if (!IS_ELF(*hdr) ||
  210             hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
  211             hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
  212             hdr->e_ident[EI_VERSION] != EV_CURRENT ||
  213             hdr->e_phentsize != sizeof(Elf_Phdr) ||
  214             hdr->e_version != ELF_TARG_VER)
  215                 return (ENOEXEC);
  216 
  217         /*
  218          * Make sure we have at least one brand for this machine.
  219          */
  220 
  221         for (i = 0; i < MAX_BRANDS; i++) {
  222                 bi = elf_brand_list[i];
  223                 if (bi != NULL && bi->machine == hdr->e_machine)
  224                         break;
  225         }
  226         if (i == MAX_BRANDS)
  227                 return (ENOEXEC);
  228 
  229         return (0);
  230 }
  231 
  232 static int
  233 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  234     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
  235 {
  236         struct sf_buf *sf;
  237         int error;
  238         vm_offset_t off;
  239 
  240         /*
  241          * Create the page if it doesn't exist yet. Ignore errors.
  242          */
  243         vm_map_lock(map);
  244         vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
  245             VM_PROT_ALL, VM_PROT_ALL, 0);
  246         vm_map_unlock(map);
  247 
  248         /*
  249          * Find the page from the underlying object.
  250          */
  251         if (object) {
  252                 sf = vm_imgact_map_page(object, offset);
  253                 if (sf == NULL)
  254                         return (KERN_FAILURE);
  255                 off = offset - trunc_page(offset);
  256                 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
  257                     end - start);
  258                 vm_imgact_unmap_page(sf);
  259                 if (error) {
  260                         return (KERN_FAILURE);
  261                 }
  262         }
  263 
  264         return (KERN_SUCCESS);
  265 }
  266 
  267 static int
  268 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  269     vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
  270 {
  271         struct sf_buf *sf;
  272         vm_offset_t off;
  273         vm_size_t sz;
  274         int error, rv;
  275 
  276         if (start != trunc_page(start)) {
  277                 rv = __elfN(map_partial)(map, object, offset, start,
  278                     round_page(start), prot);
  279                 if (rv)
  280                         return (rv);
  281                 offset += round_page(start) - start;
  282                 start = round_page(start);
  283         }
  284         if (end != round_page(end)) {
  285                 rv = __elfN(map_partial)(map, object, offset +
  286                     trunc_page(end) - start, trunc_page(end), end, prot);
  287                 if (rv)
  288                         return (rv);
  289                 end = trunc_page(end);
  290         }
  291         if (end > start) {
  292                 if (offset & PAGE_MASK) {
  293                         /*
  294                          * The mapping is not page aligned. This means we have
  295                          * to copy the data. Sigh.
  296                          */
  297                         rv = vm_map_find(map, NULL, 0, &start, end - start,
  298                             FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
  299                         if (rv)
  300                                 return (rv);
  301                         if (object == NULL)
  302                                 return (KERN_SUCCESS);
  303                         for (; start < end; start += sz) {
  304                                 sf = vm_imgact_map_page(object, offset);
  305                                 if (sf == NULL)
  306                                         return (KERN_FAILURE);
  307                                 off = offset - trunc_page(offset);
  308                                 sz = end - start;
  309                                 if (sz > PAGE_SIZE - off)
  310                                         sz = PAGE_SIZE - off;
  311                                 error = copyout((caddr_t)sf_buf_kva(sf) + off,
  312                                     (caddr_t)start, sz);
  313                                 vm_imgact_unmap_page(sf);
  314                                 if (error) {
  315                                         return (KERN_FAILURE);
  316                                 }
  317                                 offset += sz;
  318                         }
  319                         rv = KERN_SUCCESS;
  320                 } else {
  321                         vm_object_reference(object);
  322                         vm_map_lock(map);
  323                         rv = vm_map_insert(map, object, offset, start, end,
  324                             prot, VM_PROT_ALL, cow);
  325                         vm_map_unlock(map);
  326                         if (rv != KERN_SUCCESS)
  327                                 vm_object_deallocate(object);
  328                 }
  329                 return (rv);
  330         } else {
  331                 return (KERN_SUCCESS);
  332         }
  333 }
  334 
  335 static int
  336 __elfN(load_section)(struct vmspace *vmspace,
  337         vm_object_t object, vm_offset_t offset,
  338         caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
  339         size_t pagesize)
  340 {
  341         struct sf_buf *sf;
  342         size_t map_len;
  343         vm_offset_t map_addr;
  344         int error, rv, cow;
  345         size_t copy_len;
  346         vm_offset_t file_addr;
  347 
  348         /*
  349          * It's necessary to fail if the filsz + offset taken from the
  350          * header is greater than the actual file pager object's size.
  351          * If we were to allow this, then the vm_map_find() below would
  352          * walk right off the end of the file object and into the ether.
  353          *
  354          * While I'm here, might as well check for something else that
  355          * is invalid: filsz cannot be greater than memsz.
  356          */
  357         if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
  358             filsz > memsz) {
  359                 uprintf("elf_load_section: truncated ELF file\n");
  360                 return (ENOEXEC);
  361         }
  362 
  363 #define trunc_page_ps(va, ps)   ((va) & ~(ps - 1))
  364 #define round_page_ps(va, ps)   (((va) + (ps - 1)) & ~(ps - 1))
  365 
  366         map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
  367         file_addr = trunc_page_ps(offset, pagesize);
  368 
  369         /*
  370          * We have two choices.  We can either clear the data in the last page
  371          * of an oversized mapping, or we can start the anon mapping a page
  372          * early and copy the initialized data into that first page.  We
  373          * choose the second..
  374          */
  375         if (memsz > filsz)
  376                 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
  377         else
  378                 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
  379 
  380         if (map_len != 0) {
  381                 /* cow flags: don't dump readonly sections in core */
  382                 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
  383                     (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
  384 
  385                 rv = __elfN(map_insert)(&vmspace->vm_map,
  386                                       object,
  387                                       file_addr,        /* file offset */
  388                                       map_addr,         /* virtual start */
  389                                       map_addr + map_len,/* virtual end */
  390                                       prot,
  391                                       cow);
  392                 if (rv != KERN_SUCCESS)
  393                         return (EINVAL);
  394 
  395                 /* we can stop now if we've covered it all */
  396                 if (memsz == filsz) {
  397                         return (0);
  398                 }
  399         }
  400 
  401 
  402         /*
  403          * We have to get the remaining bit of the file into the first part
  404          * of the oversized map segment.  This is normally because the .data
  405          * segment in the file is extended to provide bss.  It's a neat idea
  406          * to try and save a page, but it's a pain in the behind to implement.
  407          */
  408         copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
  409         map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
  410         map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
  411             map_addr;
  412 
  413         /* This had damn well better be true! */
  414         if (map_len != 0) {
  415                 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
  416                     map_addr + map_len, VM_PROT_ALL, 0);
  417                 if (rv != KERN_SUCCESS) {
  418                         return (EINVAL);
  419                 }
  420         }
  421 
  422         if (copy_len != 0) {
  423                 vm_offset_t off;
  424 
  425                 sf = vm_imgact_map_page(object, offset + filsz);
  426                 if (sf == NULL)
  427                         return (EIO);
  428 
  429                 /* send the page fragment to user space */
  430                 off = trunc_page_ps(offset + filsz, pagesize) -
  431                     trunc_page(offset + filsz);
  432                 error = copyout((caddr_t)sf_buf_kva(sf) + off,
  433                     (caddr_t)map_addr, copy_len);
  434                 vm_imgact_unmap_page(sf);
  435                 if (error) {
  436                         return (error);
  437                 }
  438         }
  439 
  440         /*
  441          * set it to the specified protection.
  442          * XXX had better undo the damage from pasting over the cracks here!
  443          */
  444         vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
  445             round_page(map_addr + map_len),  prot, FALSE);
  446 
  447         return (0);
  448 }
  449 
  450 /*
  451  * Load the file "file" into memory.  It may be either a shared object
  452  * or an executable.
  453  *
  454  * The "addr" reference parameter is in/out.  On entry, it specifies
  455  * the address where a shared object should be loaded.  If the file is
  456  * an executable, this value is ignored.  On exit, "addr" specifies
  457  * where the file was actually loaded.
  458  *
  459  * The "entry" reference parameter is out only.  On exit, it specifies
  460  * the entry point for the loaded file.
  461  */
  462 static int
  463 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
  464         u_long *entry, size_t pagesize)
  465 {
  466         struct {
  467                 struct nameidata nd;
  468                 struct vattr attr;
  469                 struct image_params image_params;
  470         } *tempdata;
  471         const Elf_Ehdr *hdr = NULL;
  472         const Elf_Phdr *phdr = NULL;
  473         struct nameidata *nd;
  474         struct vmspace *vmspace = p->p_vmspace;
  475         struct vattr *attr;
  476         struct image_params *imgp;
  477         vm_prot_t prot;
  478         u_long rbase;
  479         u_long base_addr = 0;
  480         int vfslocked, error, i, numsegs;
  481 
  482         if (curthread->td_proc != p)
  483                 panic("elf_load_file - thread");        /* XXXKSE DIAGNOSTIC */
  484 
  485         tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
  486         nd = &tempdata->nd;
  487         attr = &tempdata->attr;
  488         imgp = &tempdata->image_params;
  489 
  490         /*
  491          * Initialize part of the common data
  492          */
  493         imgp->proc = p;
  494         imgp->attr = attr;
  495         imgp->firstpage = NULL;
  496         imgp->image_header = NULL;
  497         imgp->object = NULL;
  498         imgp->execlabel = NULL;
  499 
  500         /* XXXKSE */
  501         NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
  502             curthread);
  503         vfslocked = 0;
  504         if ((error = namei(nd)) != 0) {
  505                 nd->ni_vp = NULL;
  506                 goto fail;
  507         }
  508         vfslocked = NDHASGIANT(nd);
  509         NDFREE(nd, NDF_ONLY_PNBUF);
  510         imgp->vp = nd->ni_vp;
  511 
  512         /*
  513          * Check permissions, modes, uid, etc on the file, and "open" it.
  514          */
  515         error = exec_check_permissions(imgp);
  516         if (error)
  517                 goto fail;
  518 
  519         error = exec_map_first_page(imgp);
  520         if (error)
  521                 goto fail;
  522 
  523         /*
  524          * Also make certain that the interpreter stays the same, so set
  525          * its VV_TEXT flag, too.
  526          */
  527         nd->ni_vp->v_vflag |= VV_TEXT;
  528 
  529         imgp->object = nd->ni_vp->v_object;
  530 
  531         hdr = (const Elf_Ehdr *)imgp->image_header;
  532         if ((error = __elfN(check_header)(hdr)) != 0)
  533                 goto fail;
  534         if (hdr->e_type == ET_DYN)
  535                 rbase = *addr;
  536         else if (hdr->e_type == ET_EXEC)
  537                 rbase = 0;
  538         else {
  539                 error = ENOEXEC;
  540                 goto fail;
  541         }
  542 
  543         /* Only support headers that fit within first page for now      */
  544         /*    (multiplication of two Elf_Half fields will not overflow) */
  545         if ((hdr->e_phoff > PAGE_SIZE) ||
  546             (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
  547                 error = ENOEXEC;
  548                 goto fail;
  549         }
  550 
  551         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  552 
  553         for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
  554                 if (phdr[i].p_type == PT_LOAD) {        /* Loadable segment */
  555                         prot = 0;
  556                         if (phdr[i].p_flags & PF_X)
  557                                 prot |= VM_PROT_EXECUTE;
  558                         if (phdr[i].p_flags & PF_W)
  559                                 prot |= VM_PROT_WRITE;
  560                         if (phdr[i].p_flags & PF_R)
  561                                 prot |= VM_PROT_READ;
  562 
  563                         if ((error = __elfN(load_section)(vmspace,
  564                             imgp->object, phdr[i].p_offset,
  565                             (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
  566                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  567                             pagesize)) != 0)
  568                                 goto fail;
  569                         /*
  570                          * Establish the base address if this is the
  571                          * first segment.
  572                          */
  573                         if (numsegs == 0)
  574                                 base_addr = trunc_page(phdr[i].p_vaddr +
  575                                     rbase);
  576                         numsegs++;
  577                 }
  578         }
  579         *addr = base_addr;
  580         *entry = (unsigned long)hdr->e_entry + rbase;
  581 
  582 fail:
  583         if (imgp->firstpage)
  584                 exec_unmap_first_page(imgp);
  585 
  586         if (nd->ni_vp)
  587                 vput(nd->ni_vp);
  588 
  589         VFS_UNLOCK_GIANT(vfslocked);
  590         free(tempdata, M_TEMP);
  591 
  592         return (error);
  593 }
  594 
  595 static int
  596 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
  597 {
  598         const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
  599         const Elf_Phdr *phdr;
  600         Elf_Auxargs *elf_auxargs;
  601         struct vmspace *vmspace;
  602         vm_prot_t prot;
  603         u_long text_size = 0, data_size = 0, total_size = 0;
  604         u_long text_addr = 0, data_addr = 0;
  605         u_long seg_size, seg_addr;
  606         u_long addr, entry = 0, proghdr = 0;
  607         int error = 0, i;
  608         const char *interp = NULL;
  609         Elf_Brandinfo *brand_info;
  610         char *path;
  611         struct thread *td = curthread;
  612         struct sysentvec *sv;
  613 
  614         /*
  615          * Do we have a valid ELF header ?
  616          *
  617          * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
  618          * if particular brand doesn't support it.
  619          */
  620         if (__elfN(check_header)(hdr) != 0 ||
  621             (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
  622                 return (-1);
  623 
  624         /*
  625          * From here on down, we return an errno, not -1, as we've
  626          * detected an ELF file.
  627          */
  628 
  629         if ((hdr->e_phoff > PAGE_SIZE) ||
  630             (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
  631                 /* Only support headers in first page for now */
  632                 return (ENOEXEC);
  633         }
  634         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  635         for (i = 0; i < hdr->e_phnum; i++) {
  636                 if (phdr[i].p_type == PT_INTERP) {
  637                         /* Path to interpreter */
  638                         if (phdr[i].p_filesz > MAXPATHLEN ||
  639                             phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
  640                                 return (ENOEXEC);
  641                         interp = imgp->image_header + phdr[i].p_offset;
  642                         break;
  643                 }
  644         }
  645 
  646         brand_info = __elfN(get_brandinfo)(hdr, interp);
  647         if (brand_info == NULL) {
  648                 uprintf("ELF binary type \"%u\" not known.\n",
  649                     hdr->e_ident[EI_OSABI]);
  650                 return (ENOEXEC);
  651         }
  652         if (hdr->e_type == ET_DYN && brand_info->brand != ELFOSABI_LINUX)
  653                 return (ENOEXEC);
  654         sv = brand_info->sysvec;
  655         if (interp != NULL && brand_info->interp_newpath != NULL)
  656                 interp = brand_info->interp_newpath;
  657 
  658         /*
  659          * Avoid a possible deadlock if the current address space is destroyed
  660          * and that address space maps the locked vnode.  In the common case,
  661          * the locked vnode's v_usecount is decremented but remains greater
  662          * than zero.  Consequently, the vnode lock is not needed by vrele().
  663          * However, in cases where the vnode lock is external, such as nullfs,
  664          * v_usecount may become zero.
  665          */
  666         VOP_UNLOCK(imgp->vp, 0, td);
  667 
  668         exec_new_vmspace(imgp, sv);
  669 
  670         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
  671 
  672         vmspace = imgp->proc->p_vmspace;
  673 
  674         for (i = 0; i < hdr->e_phnum; i++) {
  675                 switch (phdr[i].p_type) {
  676                 case PT_LOAD:   /* Loadable segment */
  677                         prot = 0;
  678                         if (phdr[i].p_flags & PF_X)
  679                                 prot |= VM_PROT_EXECUTE;
  680                         if (phdr[i].p_flags & PF_W)
  681                                 prot |= VM_PROT_WRITE;
  682                         if (phdr[i].p_flags & PF_R)
  683                                 prot |= VM_PROT_READ;
  684 
  685 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
  686                         /*
  687                          * Some x86 binaries assume read == executable,
  688                          * notably the M3 runtime and therefore cvsup
  689                          */
  690                         if (prot & VM_PROT_READ)
  691                                 prot |= VM_PROT_EXECUTE;
  692 #endif
  693 
  694                         if ((error = __elfN(load_section)(vmspace,
  695                             imgp->object, phdr[i].p_offset,
  696                             (caddr_t)(uintptr_t)phdr[i].p_vaddr,
  697                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  698                             sv->sv_pagesize)) != 0)
  699                                 return (error);
  700 
  701                         /*
  702                          * If this segment contains the program headers,
  703                          * remember their virtual address for the AT_PHDR
  704                          * aux entry. Static binaries don't usually include
  705                          * a PT_PHDR entry.
  706                          */
  707                         if (phdr[i].p_offset == 0 &&
  708                             hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
  709                                 <= phdr[i].p_filesz)
  710                                 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
  711 
  712                         seg_addr = trunc_page(phdr[i].p_vaddr);
  713                         seg_size = round_page(phdr[i].p_memsz +
  714                             phdr[i].p_vaddr - seg_addr);
  715 
  716                         /*
  717                          * Is this .text or .data?  We can't use
  718                          * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
  719                          * alpha terribly and possibly does other bad
  720                          * things so we stick to the old way of figuring
  721                          * it out:  If the segment contains the program
  722                          * entry point, it's a text segment, otherwise it
  723                          * is a data segment.
  724                          *
  725                          * Note that obreak() assumes that data_addr + 
  726                          * data_size == end of data load area, and the ELF
  727                          * file format expects segments to be sorted by
  728                          * address.  If multiple data segments exist, the
  729                          * last one will be used.
  730                          */
  731                         if (hdr->e_entry >= phdr[i].p_vaddr &&
  732                             hdr->e_entry < (phdr[i].p_vaddr +
  733                             phdr[i].p_memsz)) {
  734                                 text_size = seg_size;
  735                                 text_addr = seg_addr;
  736                                 entry = (u_long)hdr->e_entry;
  737                         } else {
  738                                 data_size = seg_size;
  739                                 data_addr = seg_addr;
  740                         }
  741                         total_size += seg_size;
  742                         break;
  743                 case PT_PHDR:   /* Program header table info */
  744                         proghdr = phdr[i].p_vaddr;
  745                         break;
  746                 default:
  747                         break;
  748                 }
  749         }
  750         
  751         if (data_addr == 0 && data_size == 0) {
  752                 data_addr = text_addr;
  753                 data_size = text_size;
  754         }
  755 
  756         /*
  757          * Check limits.  It should be safe to check the
  758          * limits after loading the segments since we do
  759          * not actually fault in all the segments pages.
  760          */
  761         PROC_LOCK(imgp->proc);
  762         if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
  763             text_size > maxtsiz ||
  764             total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
  765                 PROC_UNLOCK(imgp->proc);
  766                 return (ENOMEM);
  767         }
  768 
  769         vmspace->vm_tsize = text_size >> PAGE_SHIFT;
  770         vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
  771         vmspace->vm_dsize = data_size >> PAGE_SHIFT;
  772         vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
  773 
  774         /*
  775          * We load the dynamic linker where a userland call
  776          * to mmap(0, ...) would put it.  The rationale behind this
  777          * calculation is that it leaves room for the heap to grow to
  778          * its maximum allowed size.
  779          */
  780         addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
  781             lim_max(imgp->proc, RLIMIT_DATA));
  782         PROC_UNLOCK(imgp->proc);
  783 
  784         imgp->entry_addr = entry;
  785 
  786         imgp->proc->p_sysent = sv;
  787         if (interp != NULL) {
  788                 VOP_UNLOCK(imgp->vp, 0, td);
  789                 if (brand_info->emul_path != NULL &&
  790                     brand_info->emul_path[0] != '\0') {
  791                         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  792                         snprintf(path, MAXPATHLEN, "%s%s",
  793                             brand_info->emul_path, interp);
  794                         error = __elfN(load_file)(imgp->proc, path, &addr,
  795                             &imgp->entry_addr, sv->sv_pagesize);
  796                         free(path, M_TEMP);
  797                         if (error == 0)
  798                                 interp = NULL;
  799                 }
  800                 if (interp != NULL) {
  801                         error = __elfN(load_file)(imgp->proc, interp, &addr,
  802                             &imgp->entry_addr, sv->sv_pagesize);
  803                 }
  804                 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
  805                 if (error != 0) {
  806                         uprintf("ELF interpreter %s not found\n", interp);
  807                         return (error);
  808                 }
  809         }
  810 
  811         /*
  812          * Construct auxargs table (used by the fixup routine)
  813          */
  814         elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
  815         elf_auxargs->execfd = -1;
  816         elf_auxargs->phdr = proghdr;
  817         elf_auxargs->phent = hdr->e_phentsize;
  818         elf_auxargs->phnum = hdr->e_phnum;
  819         elf_auxargs->pagesz = PAGE_SIZE;
  820         elf_auxargs->base = addr;
  821         elf_auxargs->flags = 0;
  822         elf_auxargs->entry = entry;
  823         elf_auxargs->trace = elf_trace;
  824 
  825         imgp->auxargs = elf_auxargs;
  826         imgp->interpreted = 0;
  827 
  828         return (error);
  829 }
  830 
  831 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
  832 
  833 int
  834 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
  835 {
  836         Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
  837         Elf_Addr *base;
  838         Elf_Addr *pos;
  839 
  840         base = (Elf_Addr *)*stack_base;
  841         pos = base + (imgp->args->argc + imgp->args->envc + 2);
  842 
  843         if (args->trace) {
  844                 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
  845         }
  846         if (args->execfd != -1) {
  847                 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
  848         }
  849         AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
  850         AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
  851         AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
  852         AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
  853         AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
  854         AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
  855         AUXARGS_ENTRY(pos, AT_BASE, args->base);
  856         AUXARGS_ENTRY(pos, AT_NULL, 0);
  857 
  858         free(imgp->auxargs, M_TEMP);
  859         imgp->auxargs = NULL;
  860 
  861         base--;
  862         suword(base, (long)imgp->args->argc);
  863         *stack_base = (register_t *)base;
  864         return (0);
  865 }
  866 
  867 /*
  868  * Code for generating ELF core dumps.
  869  */
  870 
  871 typedef void (*segment_callback)(vm_map_entry_t, void *);
  872 
  873 /* Closure for cb_put_phdr(). */
  874 struct phdr_closure {
  875         Elf_Phdr *phdr;         /* Program header to fill in */
  876         Elf_Off offset;         /* Offset of segment in core file */
  877 };
  878 
  879 /* Closure for cb_size_segment(). */
  880 struct sseg_closure {
  881         int count;              /* Count of writable segments. */
  882         size_t size;            /* Total size of all writable segments. */
  883 };
  884 
  885 static void cb_put_phdr(vm_map_entry_t, void *);
  886 static void cb_size_segment(vm_map_entry_t, void *);
  887 static void each_writable_segment(struct thread *, segment_callback, void *);
  888 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
  889     int, void *, size_t);
  890 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
  891 static void __elfN(putnote)(void *, size_t *, const char *, int,
  892     const void *, size_t);
  893 
  894 extern int osreldate;
  895 
  896 int
  897 __elfN(coredump)(td, vp, limit)
  898         struct thread *td;
  899         struct vnode *vp;
  900         off_t limit;
  901 {
  902         struct ucred *cred = td->td_ucred;
  903         int error = 0;
  904         struct sseg_closure seginfo;
  905         void *hdr;
  906         size_t hdrsize;
  907 
  908         /* Size the program segments. */
  909         seginfo.count = 0;
  910         seginfo.size = 0;
  911         each_writable_segment(td, cb_size_segment, &seginfo);
  912 
  913         /*
  914          * Calculate the size of the core file header area by making
  915          * a dry run of generating it.  Nothing is written, but the
  916          * size is calculated.
  917          */
  918         hdrsize = 0;
  919         __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
  920 
  921         if (hdrsize + seginfo.size >= limit)
  922                 return (EFAULT);
  923 
  924         /*
  925          * Allocate memory for building the header, fill it up,
  926          * and write it out.
  927          */
  928         hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
  929         if (hdr == NULL) {
  930                 return (EINVAL);
  931         }
  932         error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
  933 
  934         /* Write the contents of all of the writable segments. */
  935         if (error == 0) {
  936                 Elf_Phdr *php;
  937                 off_t offset;
  938                 int i;
  939 
  940                 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
  941                 offset = hdrsize;
  942                 for (i = 0; i < seginfo.count; i++) {
  943                         error = vn_rdwr_inchunks(UIO_WRITE, vp,
  944                             (caddr_t)(uintptr_t)php->p_vaddr,
  945                             php->p_filesz, offset, UIO_USERSPACE,
  946                             IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
  947                             curthread); /* XXXKSE */
  948                         if (error != 0)
  949                                 break;
  950                         offset += php->p_filesz;
  951                         php++;
  952                 }
  953         }
  954         free(hdr, M_TEMP);
  955 
  956         return (error);
  957 }
  958 
  959 /*
  960  * A callback for each_writable_segment() to write out the segment's
  961  * program header entry.
  962  */
  963 static void
  964 cb_put_phdr(entry, closure)
  965         vm_map_entry_t entry;
  966         void *closure;
  967 {
  968         struct phdr_closure *phc = (struct phdr_closure *)closure;
  969         Elf_Phdr *phdr = phc->phdr;
  970 
  971         phc->offset = round_page(phc->offset);
  972 
  973         phdr->p_type = PT_LOAD;
  974         phdr->p_offset = phc->offset;
  975         phdr->p_vaddr = entry->start;
  976         phdr->p_paddr = 0;
  977         phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
  978         phdr->p_align = PAGE_SIZE;
  979         phdr->p_flags = 0;
  980         if (entry->protection & VM_PROT_READ)
  981                 phdr->p_flags |= PF_R;
  982         if (entry->protection & VM_PROT_WRITE)
  983                 phdr->p_flags |= PF_W;
  984         if (entry->protection & VM_PROT_EXECUTE)
  985                 phdr->p_flags |= PF_X;
  986 
  987         phc->offset += phdr->p_filesz;
  988         phc->phdr++;
  989 }
  990 
  991 /*
  992  * A callback for each_writable_segment() to gather information about
  993  * the number of segments and their total size.
  994  */
  995 static void
  996 cb_size_segment(entry, closure)
  997         vm_map_entry_t entry;
  998         void *closure;
  999 {
 1000         struct sseg_closure *ssc = (struct sseg_closure *)closure;
 1001 
 1002         ssc->count++;
 1003         ssc->size += entry->end - entry->start;
 1004 }
 1005 
 1006 /*
 1007  * For each writable segment in the process's memory map, call the given
 1008  * function with a pointer to the map entry and some arbitrary
 1009  * caller-supplied data.
 1010  */
 1011 static void
 1012 each_writable_segment(td, func, closure)
 1013         struct thread *td;
 1014         segment_callback func;
 1015         void *closure;
 1016 {
 1017         struct proc *p = td->td_proc;
 1018         vm_map_t map = &p->p_vmspace->vm_map;
 1019         vm_map_entry_t entry;
 1020 
 1021         for (entry = map->header.next; entry != &map->header;
 1022             entry = entry->next) {
 1023                 vm_object_t obj;
 1024 
 1025                 /*
 1026                  * Don't dump inaccessible mappings, deal with legacy
 1027                  * coredump mode.
 1028                  *
 1029                  * Note that read-only segments related to the elf binary
 1030                  * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
 1031                  * need to arbitrarily ignore such segments.
 1032                  */
 1033                 if (elf_legacy_coredump) {
 1034                         if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
 1035                                 continue;
 1036                 } else {
 1037                         if ((entry->protection & VM_PROT_ALL) == 0)
 1038                                 continue;
 1039                 }
 1040 
 1041                 /*
 1042                  * Dont include memory segment in the coredump if
 1043                  * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
 1044                  * madvise(2).  Do not dump submaps (i.e. parts of the
 1045                  * kernel map).
 1046                  */
 1047                 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
 1048                         continue;
 1049 
 1050                 if ((obj = entry->object.vm_object) == NULL)
 1051                         continue;
 1052 
 1053                 /* Find the deepest backing object. */
 1054                 while (obj->backing_object != NULL)
 1055                         obj = obj->backing_object;
 1056 
 1057                 /* Ignore memory-mapped devices and such things. */
 1058                 if (obj->type != OBJT_DEFAULT &&
 1059                     obj->type != OBJT_SWAP &&
 1060                     obj->type != OBJT_VNODE)
 1061                         continue;
 1062 
 1063                 (*func)(entry, closure);
 1064         }
 1065 }
 1066 
 1067 /*
 1068  * Write the core file header to the file, including padding up to
 1069  * the page boundary.
 1070  */
 1071 static int
 1072 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
 1073         struct thread *td;
 1074         struct vnode *vp;
 1075         struct ucred *cred;
 1076         int numsegs;
 1077         size_t hdrsize;
 1078         void *hdr;
 1079 {
 1080         size_t off;
 1081 
 1082         /* Fill in the header. */
 1083         bzero(hdr, hdrsize);
 1084         off = 0;
 1085         __elfN(puthdr)(td, hdr, &off, numsegs);
 1086 
 1087         /* Write it to the core file. */
 1088         return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
 1089             UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
 1090             td)); /* XXXKSE */
 1091 }
 1092 
 1093 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
 1094 typedef struct prstatus32 elf_prstatus_t;
 1095 typedef struct prpsinfo32 elf_prpsinfo_t;
 1096 typedef struct fpreg32 elf_prfpregset_t;
 1097 typedef struct fpreg32 elf_fpregset_t;
 1098 typedef struct reg32 elf_gregset_t;
 1099 #else
 1100 typedef prstatus_t elf_prstatus_t;
 1101 typedef prpsinfo_t elf_prpsinfo_t;
 1102 typedef prfpregset_t elf_prfpregset_t;
 1103 typedef prfpregset_t elf_fpregset_t;
 1104 typedef gregset_t elf_gregset_t;
 1105 #endif
 1106 
 1107 static void
 1108 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
 1109 {
 1110         struct {
 1111                 elf_prstatus_t status;
 1112                 elf_prfpregset_t fpregset;
 1113                 elf_prpsinfo_t psinfo;
 1114         } *tempdata;
 1115         elf_prstatus_t *status;
 1116         elf_prfpregset_t *fpregset;
 1117         elf_prpsinfo_t *psinfo;
 1118         struct proc *p;
 1119         struct thread *thr;
 1120         size_t ehoff, noteoff, notesz, phoff;
 1121 
 1122         p = td->td_proc;
 1123 
 1124         ehoff = *off;
 1125         *off += sizeof(Elf_Ehdr);
 1126 
 1127         phoff = *off;
 1128         *off += (numsegs + 1) * sizeof(Elf_Phdr);
 1129 
 1130         noteoff = *off;
 1131         /*
 1132          * Don't allocate space for the notes if we're just calculating
 1133          * the size of the header. We also don't collect the data.
 1134          */
 1135         if (dst != NULL) {
 1136                 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
 1137                 status = &tempdata->status;
 1138                 fpregset = &tempdata->fpregset;
 1139                 psinfo = &tempdata->psinfo;
 1140         } else {
 1141                 tempdata = NULL;
 1142                 status = NULL;
 1143                 fpregset = NULL;
 1144                 psinfo = NULL;
 1145         }
 1146 
 1147         if (dst != NULL) {
 1148                 psinfo->pr_version = PRPSINFO_VERSION;
 1149                 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
 1150                 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
 1151                 /*
 1152                  * XXX - We don't fill in the command line arguments properly
 1153                  * yet.
 1154                  */
 1155                 strlcpy(psinfo->pr_psargs, p->p_comm,
 1156                     sizeof(psinfo->pr_psargs));
 1157         }
 1158         __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
 1159             sizeof *psinfo);
 1160 
 1161         /*
 1162          * To have the debugger select the right thread (LWP) as the initial
 1163          * thread, we dump the state of the thread passed to us in td first.
 1164          * This is the thread that causes the core dump and thus likely to
 1165          * be the right thread one wants to have selected in the debugger.
 1166          */
 1167         thr = td;
 1168         while (thr != NULL) {
 1169                 if (dst != NULL) {
 1170                         status->pr_version = PRSTATUS_VERSION;
 1171                         status->pr_statussz = sizeof(elf_prstatus_t);
 1172                         status->pr_gregsetsz = sizeof(elf_gregset_t);
 1173                         status->pr_fpregsetsz = sizeof(elf_fpregset_t);
 1174                         status->pr_osreldate = osreldate;
 1175                         status->pr_cursig = p->p_sig;
 1176                         status->pr_pid = thr->td_tid;
 1177 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
 1178                         fill_regs32(thr, &status->pr_reg);
 1179                         fill_fpregs32(thr, fpregset);
 1180 #else
 1181                         fill_regs(thr, &status->pr_reg);
 1182                         fill_fpregs(thr, fpregset);
 1183 #endif
 1184                 }
 1185                 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
 1186                     sizeof *status);
 1187                 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
 1188                     sizeof *fpregset);
 1189                 /*
 1190                  * Allow for MD specific notes, as well as any MD
 1191                  * specific preparations for writing MI notes.
 1192                  */
 1193                 __elfN(dump_thread)(thr, dst, off);
 1194 
 1195                 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
 1196                     TAILQ_NEXT(thr, td_plist);
 1197                 if (thr == td)
 1198                         thr = TAILQ_NEXT(thr, td_plist);
 1199         }
 1200 
 1201         notesz = *off - noteoff;
 1202 
 1203         if (dst != NULL)
 1204                 free(tempdata, M_TEMP);
 1205 
 1206         /* Align up to a page boundary for the program segments. */
 1207         *off = round_page(*off);
 1208 
 1209         if (dst != NULL) {
 1210                 Elf_Ehdr *ehdr;
 1211                 Elf_Phdr *phdr;
 1212                 struct phdr_closure phc;
 1213 
 1214                 /*
 1215                  * Fill in the ELF header.
 1216                  */
 1217                 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
 1218                 ehdr->e_ident[EI_MAG0] = ELFMAG0;
 1219                 ehdr->e_ident[EI_MAG1] = ELFMAG1;
 1220                 ehdr->e_ident[EI_MAG2] = ELFMAG2;
 1221                 ehdr->e_ident[EI_MAG3] = ELFMAG3;
 1222                 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
 1223                 ehdr->e_ident[EI_DATA] = ELF_DATA;
 1224                 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
 1225                 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
 1226                 ehdr->e_ident[EI_ABIVERSION] = 0;
 1227                 ehdr->e_ident[EI_PAD] = 0;
 1228                 ehdr->e_type = ET_CORE;
 1229 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
 1230                 ehdr->e_machine = EM_386;
 1231 #else
 1232                 ehdr->e_machine = ELF_ARCH;
 1233 #endif
 1234                 ehdr->e_version = EV_CURRENT;
 1235                 ehdr->e_entry = 0;
 1236                 ehdr->e_phoff = phoff;
 1237                 ehdr->e_flags = 0;
 1238                 ehdr->e_ehsize = sizeof(Elf_Ehdr);
 1239                 ehdr->e_phentsize = sizeof(Elf_Phdr);
 1240                 ehdr->e_phnum = numsegs + 1;
 1241                 ehdr->e_shentsize = sizeof(Elf_Shdr);
 1242                 ehdr->e_shnum = 0;
 1243                 ehdr->e_shstrndx = SHN_UNDEF;
 1244 
 1245                 /*
 1246                  * Fill in the program header entries.
 1247                  */
 1248                 phdr = (Elf_Phdr *)((char *)dst + phoff);
 1249 
 1250                 /* The note segement. */
 1251                 phdr->p_type = PT_NOTE;
 1252                 phdr->p_offset = noteoff;
 1253                 phdr->p_vaddr = 0;
 1254                 phdr->p_paddr = 0;
 1255                 phdr->p_filesz = notesz;
 1256                 phdr->p_memsz = 0;
 1257                 phdr->p_flags = 0;
 1258                 phdr->p_align = 0;
 1259                 phdr++;
 1260 
 1261                 /* All the writable segments from the program. */
 1262                 phc.phdr = phdr;
 1263                 phc.offset = *off;
 1264                 each_writable_segment(td, cb_put_phdr, &phc);
 1265         }
 1266 }
 1267 
 1268 static void
 1269 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
 1270     const void *desc, size_t descsz)
 1271 {
 1272         Elf_Note note;
 1273 
 1274         note.n_namesz = strlen(name) + 1;
 1275         note.n_descsz = descsz;
 1276         note.n_type = type;
 1277         if (dst != NULL)
 1278                 bcopy(&note, (char *)dst + *off, sizeof note);
 1279         *off += sizeof note;
 1280         if (dst != NULL)
 1281                 bcopy(name, (char *)dst + *off, note.n_namesz);
 1282         *off += roundup2(note.n_namesz, sizeof(Elf_Size));
 1283         if (dst != NULL)
 1284                 bcopy(desc, (char *)dst + *off, note.n_descsz);
 1285         *off += roundup2(note.n_descsz, sizeof(Elf_Size));
 1286 }
 1287 
 1288 /*
 1289  * Tell kern_execve.c about it, with a little help from the linker.
 1290  */
 1291 static struct execsw __elfN(execsw) = {
 1292         __CONCAT(exec_, __elfN(imgact)),
 1293         __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
 1294 };
 1295 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));

Cache object: cbec09048081750171e25f4eabb851ff


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.