The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 David O'Brien
    3  * Copyright (c) 1995-1996 Søren Schmidt
    4  * Copyright (c) 1996 Peter Wemm
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer
   12  *    in this position and unchanged.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/6.0/sys/kern/imgact_elf.c 150271 2005-09-18 03:31:35Z csjp $");
   33 
   34 #include "opt_compat.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/exec.h>
   38 #include <sys/fcntl.h>
   39 #include <sys/imgact.h>
   40 #include <sys/imgact_elf.h>
   41 #include <sys/kernel.h>
   42 #include <sys/lock.h>
   43 #include <sys/malloc.h>
   44 #include <sys/mount.h>
   45 #include <sys/mutex.h>
   46 #include <sys/mman.h>
   47 #include <sys/namei.h>
   48 #include <sys/pioctl.h>
   49 #include <sys/proc.h>
   50 #include <sys/procfs.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/systm.h>
   53 #include <sys/signalvar.h>
   54 #include <sys/stat.h>
   55 #include <sys/sx.h>
   56 #include <sys/syscall.h>
   57 #include <sys/sysctl.h>
   58 #include <sys/sysent.h>
   59 #include <sys/vnode.h>
   60 
   61 #include <vm/vm.h>
   62 #include <vm/vm_kern.h>
   63 #include <vm/vm_param.h>
   64 #include <vm/pmap.h>
   65 #include <vm/vm_map.h>
   66 #include <vm/vm_object.h>
   67 #include <vm/vm_extern.h>
   68 
   69 #include <machine/elf.h>
   70 #include <machine/md_var.h>
   71 
   72 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
   73 #include <machine/fpu.h>
   74 #include <compat/ia32/ia32_reg.h>
   75 #endif
   76 
   77 #define OLD_EI_BRAND    8
   78 
   79 static int __elfN(check_header)(const Elf_Ehdr *hdr);
   80 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
   81     const char *interp);
   82 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
   83     u_long *entry, size_t pagesize);
   84 static int __elfN(load_section)(struct proc *p,
   85     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
   86     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
   87     vm_prot_t prot, size_t pagesize);
   88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
   89 
   90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
   91     "");
   92 
   93 int __elfN(fallback_brand) = -1;
   94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
   95     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
   96     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
   97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
   98     &__elfN(fallback_brand));
   99 
  100 static int elf_trace = 0;
  101 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
  102 
  103 static int elf_legacy_coredump = 0;
  104 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 
  105     &elf_legacy_coredump, 0, "");
  106 
  107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
  108 
  109 int
  110 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
  111 {
  112         int i;
  113 
  114         for (i = 0; i < MAX_BRANDS; i++) {
  115                 if (elf_brand_list[i] == NULL) {
  116                         elf_brand_list[i] = entry;
  117                         break;
  118                 }
  119         }
  120         if (i == MAX_BRANDS)
  121                 return (-1);
  122         return (0);
  123 }
  124 
  125 int
  126 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
  127 {
  128         int i;
  129 
  130         for (i = 0; i < MAX_BRANDS; i++) {
  131                 if (elf_brand_list[i] == entry) {
  132                         elf_brand_list[i] = NULL;
  133                         break;
  134                 }
  135         }
  136         if (i == MAX_BRANDS)
  137                 return (-1);
  138         return (0);
  139 }
  140 
  141 int
  142 __elfN(brand_inuse)(Elf_Brandinfo *entry)
  143 {
  144         struct proc *p;
  145         int rval = FALSE;
  146 
  147         sx_slock(&allproc_lock);
  148         LIST_FOREACH(p, &allproc, p_list) {
  149                 if (p->p_sysent == entry->sysvec) {
  150                         rval = TRUE;
  151                         break;
  152                 }
  153         }
  154         sx_sunlock(&allproc_lock);
  155 
  156         return (rval);
  157 }
  158 
  159 static Elf_Brandinfo *
  160 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
  161 {
  162         Elf_Brandinfo *bi;
  163         int i;
  164 
  165         /*
  166          * We support three types of branding -- (1) the ELF EI_OSABI field
  167          * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
  168          * branding w/in the ELF header, and (3) path of the `interp_path'
  169          * field.  We should also look for an ".note.ABI-tag" ELF section now
  170          * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
  171          */
  172 
  173         /* If the executable has a brand, search for it in the brand list. */
  174         for (i = 0; i < MAX_BRANDS; i++) {
  175                 bi = elf_brand_list[i];
  176                 if (bi != NULL && hdr->e_machine == bi->machine &&
  177                     (hdr->e_ident[EI_OSABI] == bi->brand ||
  178                     strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
  179                     bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
  180                         return (bi);
  181         }
  182 
  183         /* Lacking a known brand, search for a recognized interpreter. */
  184         if (interp != NULL) {
  185                 for (i = 0; i < MAX_BRANDS; i++) {
  186                         bi = elf_brand_list[i];
  187                         if (bi != NULL && hdr->e_machine == bi->machine &&
  188                             strcmp(interp, bi->interp_path) == 0)
  189                                 return (bi);
  190                 }
  191         }
  192 
  193         /* Lacking a recognized interpreter, try the default brand */
  194         for (i = 0; i < MAX_BRANDS; i++) {
  195                 bi = elf_brand_list[i];
  196                 if (bi != NULL && hdr->e_machine == bi->machine &&
  197                     __elfN(fallback_brand) == bi->brand)
  198                         return (bi);
  199         }
  200         return (NULL);
  201 }
  202 
  203 static int
  204 __elfN(check_header)(const Elf_Ehdr *hdr)
  205 {
  206         Elf_Brandinfo *bi;
  207         int i;
  208 
  209         if (!IS_ELF(*hdr) ||
  210             hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
  211             hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
  212             hdr->e_ident[EI_VERSION] != EV_CURRENT ||
  213             hdr->e_phentsize != sizeof(Elf_Phdr) ||
  214             hdr->e_version != ELF_TARG_VER)
  215                 return (ENOEXEC);
  216 
  217         /*
  218          * Make sure we have at least one brand for this machine.
  219          */
  220 
  221         for (i = 0; i < MAX_BRANDS; i++) {
  222                 bi = elf_brand_list[i];
  223                 if (bi != NULL && bi->machine == hdr->e_machine)
  224                         break;
  225         }
  226         if (i == MAX_BRANDS)
  227                 return (ENOEXEC);
  228 
  229         return (0);
  230 }
  231 
  232 static int
  233 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  234         vm_offset_t start, vm_offset_t end, vm_prot_t prot,
  235         vm_prot_t max)
  236 {
  237         int error, rv;
  238         vm_offset_t off;
  239         vm_offset_t data_buf = 0;
  240 
  241         /*
  242          * Create the page if it doesn't exist yet. Ignore errors.
  243          */
  244         vm_map_lock(map);
  245         vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
  246             max, 0);
  247         vm_map_unlock(map);
  248 
  249         /*
  250          * Find the page from the underlying object.
  251          */
  252         if (object) {
  253                 vm_object_reference(object);
  254                 rv = vm_map_find(exec_map,
  255                                  object,
  256                                  trunc_page(offset),
  257                                  &data_buf,
  258                                  PAGE_SIZE,
  259                                  TRUE,
  260                                  VM_PROT_READ,
  261                                  VM_PROT_ALL,
  262                                  MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
  263                 if (rv != KERN_SUCCESS) {
  264                         vm_object_deallocate(object);
  265                         return (rv);
  266                 }
  267 
  268                 off = offset - trunc_page(offset);
  269                 error = copyout((caddr_t)data_buf + off, (caddr_t)start,
  270                     end - start);
  271                 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
  272                 if (error) {
  273                         return (KERN_FAILURE);
  274                 }
  275         }
  276 
  277         return (KERN_SUCCESS);
  278 }
  279 
  280 static int
  281 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  282         vm_offset_t start, vm_offset_t end, vm_prot_t prot,
  283         vm_prot_t max, int cow)
  284 {
  285         vm_offset_t data_buf, off;
  286         vm_size_t sz;
  287         int error, rv;
  288 
  289         if (start != trunc_page(start)) {
  290                 rv = __elfN(map_partial)(map, object, offset, start,
  291                     round_page(start), prot, max);
  292                 if (rv)
  293                         return (rv);
  294                 offset += round_page(start) - start;
  295                 start = round_page(start);
  296         }
  297         if (end != round_page(end)) {
  298                 rv = __elfN(map_partial)(map, object, offset +
  299                     trunc_page(end) - start, trunc_page(end), end, prot, max);
  300                 if (rv)
  301                         return (rv);
  302                 end = trunc_page(end);
  303         }
  304         if (end > start) {
  305                 if (offset & PAGE_MASK) {
  306                         /*
  307                          * The mapping is not page aligned. This means we have
  308                          * to copy the data. Sigh.
  309                          */
  310                         rv = vm_map_find(map, 0, 0, &start, end - start,
  311                             FALSE, prot, max, 0);
  312                         if (rv)
  313                                 return (rv);
  314                         data_buf = 0;
  315                         while (start < end) {
  316                                 vm_object_reference(object);
  317                                 rv = vm_map_find(exec_map,
  318                                                  object,
  319                                                  trunc_page(offset),
  320                                                  &data_buf,
  321                                                  2 * PAGE_SIZE,
  322                                                  TRUE,
  323                                                  VM_PROT_READ,
  324                                                  VM_PROT_ALL,
  325                                                  (MAP_COPY_ON_WRITE
  326                                                   | MAP_PREFAULT_PARTIAL));
  327                                 if (rv != KERN_SUCCESS) {
  328                                         vm_object_deallocate(object);
  329                                         return (rv);
  330                                 }
  331                                 off = offset - trunc_page(offset);
  332                                 sz = end - start;
  333                                 if (sz > PAGE_SIZE)
  334                                         sz = PAGE_SIZE;
  335                                 error = copyout((caddr_t)data_buf + off,
  336                                     (caddr_t)start, sz);
  337                                 vm_map_remove(exec_map, data_buf,
  338                                     data_buf + 2 * PAGE_SIZE);
  339                                 if (error) {
  340                                         return (KERN_FAILURE);
  341                                 }
  342                                 start += sz;
  343                         }
  344                         rv = KERN_SUCCESS;
  345                 } else {
  346                         vm_map_lock(map);
  347                         rv = vm_map_insert(map, object, offset, start, end,
  348                             prot, max, cow);
  349                         vm_map_unlock(map);
  350                 }
  351                 return (rv);
  352         } else {
  353                 return (KERN_SUCCESS);
  354         }
  355 }
  356 
  357 static int
  358 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
  359         struct vnode *vp, vm_object_t object, vm_offset_t offset,
  360         caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
  361         size_t pagesize)
  362 {
  363         size_t map_len;
  364         vm_offset_t map_addr;
  365         int error, rv, cow;
  366         size_t copy_len;
  367         vm_offset_t file_addr;
  368         vm_offset_t data_buf = 0;
  369 
  370         error = 0;
  371 
  372         /*
  373          * It's necessary to fail if the filsz + offset taken from the
  374          * header is greater than the actual file pager object's size.
  375          * If we were to allow this, then the vm_map_find() below would
  376          * walk right off the end of the file object and into the ether.
  377          *
  378          * While I'm here, might as well check for something else that
  379          * is invalid: filsz cannot be greater than memsz.
  380          */
  381         if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
  382             filsz > memsz) {
  383                 uprintf("elf_load_section: truncated ELF file\n");
  384                 return (ENOEXEC);
  385         }
  386 
  387 #define trunc_page_ps(va, ps)   ((va) & ~(ps - 1))
  388 #define round_page_ps(va, ps)   (((va) + (ps - 1)) & ~(ps - 1))
  389 
  390         map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
  391         file_addr = trunc_page_ps(offset, pagesize);
  392 
  393         /*
  394          * We have two choices.  We can either clear the data in the last page
  395          * of an oversized mapping, or we can start the anon mapping a page
  396          * early and copy the initialized data into that first page.  We
  397          * choose the second..
  398          */
  399         if (memsz > filsz)
  400                 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
  401         else
  402                 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
  403 
  404         if (map_len != 0) {
  405                 vm_object_reference(object);
  406 
  407                 /* cow flags: don't dump readonly sections in core */
  408                 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
  409                     (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
  410 
  411                 rv = __elfN(map_insert)(&vmspace->vm_map,
  412                                       object,
  413                                       file_addr,        /* file offset */
  414                                       map_addr,         /* virtual start */
  415                                       map_addr + map_len,/* virtual end */
  416                                       prot,
  417                                       VM_PROT_ALL,
  418                                       cow);
  419                 if (rv != KERN_SUCCESS) {
  420                         vm_object_deallocate(object);
  421                         return (EINVAL);
  422                 }
  423 
  424                 /* we can stop now if we've covered it all */
  425                 if (memsz == filsz) {
  426                         return (0);
  427                 }
  428         }
  429 
  430 
  431         /*
  432          * We have to get the remaining bit of the file into the first part
  433          * of the oversized map segment.  This is normally because the .data
  434          * segment in the file is extended to provide bss.  It's a neat idea
  435          * to try and save a page, but it's a pain in the behind to implement.
  436          */
  437         copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
  438         map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
  439         map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
  440             map_addr;
  441 
  442         /* This had damn well better be true! */
  443         if (map_len != 0) {
  444                 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
  445                     map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
  446                 if (rv != KERN_SUCCESS) {
  447                         return (EINVAL);
  448                 }
  449         }
  450 
  451         if (copy_len != 0) {
  452                 vm_offset_t off;
  453                 vm_object_reference(object);
  454                 rv = vm_map_find(exec_map,
  455                                  object,
  456                                  trunc_page(offset + filsz),
  457                                  &data_buf,
  458                                  PAGE_SIZE,
  459                                  TRUE,
  460                                  VM_PROT_READ,
  461                                  VM_PROT_ALL,
  462                                  MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
  463                 if (rv != KERN_SUCCESS) {
  464                         vm_object_deallocate(object);
  465                         return (EINVAL);
  466                 }
  467 
  468                 /* send the page fragment to user space */
  469                 off = trunc_page_ps(offset + filsz, pagesize) -
  470                     trunc_page(offset + filsz);
  471                 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
  472                     copy_len);
  473                 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
  474                 if (error) {
  475                         return (error);
  476                 }
  477         }
  478 
  479         /*
  480          * set it to the specified protection.
  481          * XXX had better undo the damage from pasting over the cracks here!
  482          */
  483         vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
  484             round_page(map_addr + map_len),  prot, FALSE);
  485 
  486         return (error);
  487 }
  488 
  489 /*
  490  * Load the file "file" into memory.  It may be either a shared object
  491  * or an executable.
  492  *
  493  * The "addr" reference parameter is in/out.  On entry, it specifies
  494  * the address where a shared object should be loaded.  If the file is
  495  * an executable, this value is ignored.  On exit, "addr" specifies
  496  * where the file was actually loaded.
  497  *
  498  * The "entry" reference parameter is out only.  On exit, it specifies
  499  * the entry point for the loaded file.
  500  */
  501 static int
  502 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
  503         u_long *entry, size_t pagesize)
  504 {
  505         struct {
  506                 struct nameidata nd;
  507                 struct vattr attr;
  508                 struct image_params image_params;
  509         } *tempdata;
  510         const Elf_Ehdr *hdr = NULL;
  511         const Elf_Phdr *phdr = NULL;
  512         struct nameidata *nd;
  513         struct vmspace *vmspace = p->p_vmspace;
  514         struct vattr *attr;
  515         struct image_params *imgp;
  516         vm_prot_t prot;
  517         u_long rbase;
  518         u_long base_addr = 0;
  519         int vfslocked, error, i, numsegs;
  520 
  521         if (curthread->td_proc != p)
  522                 panic("elf_load_file - thread");        /* XXXKSE DIAGNOSTIC */
  523 
  524         tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
  525         nd = &tempdata->nd;
  526         attr = &tempdata->attr;
  527         imgp = &tempdata->image_params;
  528 
  529         /*
  530          * Initialize part of the common data
  531          */
  532         imgp->proc = p;
  533         imgp->attr = attr;
  534         imgp->firstpage = NULL;
  535         imgp->image_header = NULL;
  536         imgp->object = NULL;
  537         imgp->execlabel = NULL;
  538 
  539         /* XXXKSE */
  540         NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
  541             curthread);
  542         vfslocked = 0;
  543         if ((error = namei(nd)) != 0) {
  544                 nd->ni_vp = NULL;
  545                 goto fail;
  546         }
  547         vfslocked = NDHASGIANT(nd);
  548         NDFREE(nd, NDF_ONLY_PNBUF);
  549         imgp->vp = nd->ni_vp;
  550 
  551         /*
  552          * Check permissions, modes, uid, etc on the file, and "open" it.
  553          */
  554         error = exec_check_permissions(imgp);
  555         if (error) {
  556                 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
  557                 goto fail;
  558         }
  559 
  560         error = exec_map_first_page(imgp);
  561         /*
  562          * Also make certain that the interpreter stays the same, so set
  563          * its VV_TEXT flag, too.
  564          */
  565         if (error == 0)
  566                 nd->ni_vp->v_vflag |= VV_TEXT;
  567 
  568         imgp->object = nd->ni_vp->v_object;
  569         vm_object_reference(imgp->object);
  570 
  571         VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
  572         if (error)
  573                 goto fail;
  574 
  575         hdr = (const Elf_Ehdr *)imgp->image_header;
  576         if ((error = __elfN(check_header)(hdr)) != 0)
  577                 goto fail;
  578         if (hdr->e_type == ET_DYN)
  579                 rbase = *addr;
  580         else if (hdr->e_type == ET_EXEC)
  581                 rbase = 0;
  582         else {
  583                 error = ENOEXEC;
  584                 goto fail;
  585         }
  586 
  587         /* Only support headers that fit within first page for now      */
  588         /*    (multiplication of two Elf_Half fields will not overflow) */
  589         if ((hdr->e_phoff > PAGE_SIZE) ||
  590             (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
  591                 error = ENOEXEC;
  592                 goto fail;
  593         }
  594 
  595         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  596 
  597         for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
  598                 if (phdr[i].p_type == PT_LOAD) {        /* Loadable segment */
  599                         prot = 0;
  600                         if (phdr[i].p_flags & PF_X)
  601                                 prot |= VM_PROT_EXECUTE;
  602                         if (phdr[i].p_flags & PF_W)
  603                                 prot |= VM_PROT_WRITE;
  604                         if (phdr[i].p_flags & PF_R)
  605                                 prot |= VM_PROT_READ;
  606 
  607                         if ((error = __elfN(load_section)(p, vmspace,
  608                             nd->ni_vp, imgp->object, phdr[i].p_offset,
  609                             (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
  610                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  611                             pagesize)) != 0)
  612                                 goto fail;
  613                         /*
  614                          * Establish the base address if this is the
  615                          * first segment.
  616                          */
  617                         if (numsegs == 0)
  618                                 base_addr = trunc_page(phdr[i].p_vaddr +
  619                                     rbase);
  620                         numsegs++;
  621                 }
  622         }
  623         *addr = base_addr;
  624         *entry = (unsigned long)hdr->e_entry + rbase;
  625 
  626 fail:
  627         if (imgp->firstpage)
  628                 exec_unmap_first_page(imgp);
  629         if (imgp->object)
  630                 vm_object_deallocate(imgp->object);
  631 
  632         if (nd->ni_vp)
  633                 vrele(nd->ni_vp);
  634 
  635         VFS_UNLOCK_GIANT(vfslocked);
  636         free(tempdata, M_TEMP);
  637 
  638         return (error);
  639 }
  640 
  641 static int
  642 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
  643 {
  644         const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
  645         const Elf_Phdr *phdr;
  646         Elf_Auxargs *elf_auxargs = NULL;
  647         struct vmspace *vmspace;
  648         vm_prot_t prot;
  649         u_long text_size = 0, data_size = 0, total_size = 0;
  650         u_long text_addr = 0, data_addr = 0;
  651         u_long seg_size, seg_addr;
  652         u_long addr, entry = 0, proghdr = 0;
  653         int error = 0, i;
  654         const char *interp = NULL;
  655         Elf_Brandinfo *brand_info;
  656         char *path;
  657         struct thread *td = curthread;
  658         struct sysentvec *sv;
  659 
  660         /*
  661          * Do we have a valid ELF header ?
  662          */
  663         if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
  664                 return (-1);
  665 
  666         /*
  667          * From here on down, we return an errno, not -1, as we've
  668          * detected an ELF file.
  669          */
  670 
  671         if ((hdr->e_phoff > PAGE_SIZE) ||
  672             (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
  673                 /* Only support headers in first page for now */
  674                 return (ENOEXEC);
  675         }
  676         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  677 
  678         /*
  679          * From this point on, we may have resources that need to be freed.
  680          */
  681 
  682         VOP_UNLOCK(imgp->vp, 0, td);
  683 
  684         for (i = 0; i < hdr->e_phnum; i++) {
  685                 switch (phdr[i].p_type) {
  686                 case PT_INTERP: /* Path to interpreter */
  687                         if (phdr[i].p_filesz > MAXPATHLEN ||
  688                             phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
  689                                 error = ENOEXEC;
  690                                 goto fail;
  691                         }
  692                         interp = imgp->image_header + phdr[i].p_offset;
  693                         break;
  694                 default:
  695                         break;
  696                 }
  697         }
  698 
  699         brand_info = __elfN(get_brandinfo)(hdr, interp);
  700         if (brand_info == NULL) {
  701                 uprintf("ELF binary type \"%u\" not known.\n",
  702                     hdr->e_ident[EI_OSABI]);
  703                 error = ENOEXEC;
  704                 goto fail;
  705         }
  706         sv = brand_info->sysvec;
  707         if (interp != NULL && brand_info->interp_newpath != NULL)
  708                 interp = brand_info->interp_newpath;
  709 
  710         exec_new_vmspace(imgp, sv);
  711 
  712         vmspace = imgp->proc->p_vmspace;
  713 
  714         for (i = 0; i < hdr->e_phnum; i++) {
  715                 switch (phdr[i].p_type) {
  716                 case PT_LOAD:   /* Loadable segment */
  717                         prot = 0;
  718                         if (phdr[i].p_flags & PF_X)
  719                                 prot |= VM_PROT_EXECUTE;
  720                         if (phdr[i].p_flags & PF_W)
  721                                 prot |= VM_PROT_WRITE;
  722                         if (phdr[i].p_flags & PF_R)
  723                                 prot |= VM_PROT_READ;
  724 
  725 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
  726                         /*
  727                          * Some x86 binaries assume read == executable,
  728                          * notably the M3 runtime and therefore cvsup
  729                          */
  730                         if (prot & VM_PROT_READ)
  731                                 prot |= VM_PROT_EXECUTE;
  732 #endif
  733 
  734                         if ((error = __elfN(load_section)(imgp->proc, vmspace,
  735                             imgp->vp, imgp->object, phdr[i].p_offset,
  736                             (caddr_t)(uintptr_t)phdr[i].p_vaddr,
  737                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  738                             sv->sv_pagesize)) != 0)
  739                                 goto fail;
  740 
  741                         /*
  742                          * If this segment contains the program headers,
  743                          * remember their virtual address for the AT_PHDR
  744                          * aux entry. Static binaries don't usually include
  745                          * a PT_PHDR entry.
  746                          */
  747                         if (phdr[i].p_offset == 0 &&
  748                             hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
  749                                 <= phdr[i].p_filesz)
  750                                 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
  751 
  752                         seg_addr = trunc_page(phdr[i].p_vaddr);
  753                         seg_size = round_page(phdr[i].p_memsz +
  754                             phdr[i].p_vaddr - seg_addr);
  755 
  756                         /*
  757                          * Is this .text or .data?  We can't use
  758                          * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
  759                          * alpha terribly and possibly does other bad
  760                          * things so we stick to the old way of figuring
  761                          * it out:  If the segment contains the program
  762                          * entry point, it's a text segment, otherwise it
  763                          * is a data segment.
  764                          *
  765                          * Note that obreak() assumes that data_addr + 
  766                          * data_size == end of data load area, and the ELF
  767                          * file format expects segments to be sorted by
  768                          * address.  If multiple data segments exist, the
  769                          * last one will be used.
  770                          */
  771                         if (hdr->e_entry >= phdr[i].p_vaddr &&
  772                             hdr->e_entry < (phdr[i].p_vaddr +
  773                             phdr[i].p_memsz)) {
  774                                 text_size = seg_size;
  775                                 text_addr = seg_addr;
  776                                 entry = (u_long)hdr->e_entry;
  777                         } else {
  778                                 data_size = seg_size;
  779                                 data_addr = seg_addr;
  780                         }
  781                         total_size += seg_size;
  782                         break;
  783                 case PT_PHDR:   /* Program header table info */
  784                         proghdr = phdr[i].p_vaddr;
  785                         break;
  786                 default:
  787                         break;
  788                 }
  789         }
  790         
  791         if (data_addr == 0 && data_size == 0) {
  792                 data_addr = text_addr;
  793                 data_size = text_size;
  794         }
  795 
  796         /*
  797          * Check limits.  It should be safe to check the
  798          * limits after loading the segments since we do
  799          * not actually fault in all the segments pages.
  800          */
  801         PROC_LOCK(imgp->proc);
  802         if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
  803             text_size > maxtsiz ||
  804             total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
  805                 PROC_UNLOCK(imgp->proc);
  806                 error = ENOMEM;
  807                 goto fail;
  808         }
  809 
  810         vmspace->vm_tsize = text_size >> PAGE_SHIFT;
  811         vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
  812         vmspace->vm_dsize = data_size >> PAGE_SHIFT;
  813         vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
  814 
  815         /*
  816          * We load the dynamic linker where a userland call
  817          * to mmap(0, ...) would put it.  The rationale behind this
  818          * calculation is that it leaves room for the heap to grow to
  819          * its maximum allowed size.
  820          */
  821         addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
  822             lim_max(imgp->proc, RLIMIT_DATA));
  823         PROC_UNLOCK(imgp->proc);
  824 
  825         imgp->entry_addr = entry;
  826 
  827         imgp->proc->p_sysent = sv;
  828         if (interp != NULL && brand_info->emul_path != NULL &&
  829             brand_info->emul_path[0] != '\0') {
  830                 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  831                 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
  832                     interp);
  833                 error = __elfN(load_file)(imgp->proc, path, &addr,
  834                     &imgp->entry_addr, sv->sv_pagesize);
  835                 free(path, M_TEMP);
  836                 if (error == 0)
  837                         interp = NULL;
  838         }
  839         if (interp != NULL) {
  840                 error = __elfN(load_file)(imgp->proc, interp, &addr,
  841                     &imgp->entry_addr, sv->sv_pagesize);
  842                 if (error != 0) {
  843                         uprintf("ELF interpreter %s not found\n", interp);
  844                         goto fail;
  845                 }
  846         }
  847 
  848         /*
  849          * Construct auxargs table (used by the fixup routine)
  850          */
  851         elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
  852         elf_auxargs->execfd = -1;
  853         elf_auxargs->phdr = proghdr;
  854         elf_auxargs->phent = hdr->e_phentsize;
  855         elf_auxargs->phnum = hdr->e_phnum;
  856         elf_auxargs->pagesz = PAGE_SIZE;
  857         elf_auxargs->base = addr;
  858         elf_auxargs->flags = 0;
  859         elf_auxargs->entry = entry;
  860         elf_auxargs->trace = elf_trace;
  861 
  862         imgp->auxargs = elf_auxargs;
  863         imgp->interpreted = 0;
  864 
  865 fail:
  866         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
  867         return (error);
  868 }
  869 
  870 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
  871 
  872 int
  873 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
  874 {
  875         Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
  876         Elf_Addr *base;
  877         Elf_Addr *pos;
  878 
  879         base = (Elf_Addr *)*stack_base;
  880         pos = base + (imgp->args->argc + imgp->args->envc + 2);
  881 
  882         if (args->trace) {
  883                 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
  884         }
  885         if (args->execfd != -1) {
  886                 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
  887         }
  888         AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
  889         AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
  890         AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
  891         AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
  892         AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
  893         AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
  894         AUXARGS_ENTRY(pos, AT_BASE, args->base);
  895         AUXARGS_ENTRY(pos, AT_NULL, 0);
  896 
  897         free(imgp->auxargs, M_TEMP);
  898         imgp->auxargs = NULL;
  899 
  900         base--;
  901         suword(base, (long)imgp->args->argc);
  902         *stack_base = (register_t *)base;
  903         return (0);
  904 }
  905 
  906 /*
  907  * Code for generating ELF core dumps.
  908  */
  909 
  910 typedef void (*segment_callback)(vm_map_entry_t, void *);
  911 
  912 /* Closure for cb_put_phdr(). */
  913 struct phdr_closure {
  914         Elf_Phdr *phdr;         /* Program header to fill in */
  915         Elf_Off offset;         /* Offset of segment in core file */
  916 };
  917 
  918 /* Closure for cb_size_segment(). */
  919 struct sseg_closure {
  920         int count;              /* Count of writable segments. */
  921         size_t size;            /* Total size of all writable segments. */
  922 };
  923 
  924 static void cb_put_phdr(vm_map_entry_t, void *);
  925 static void cb_size_segment(vm_map_entry_t, void *);
  926 static void each_writable_segment(struct thread *, segment_callback, void *);
  927 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
  928     int, void *, size_t);
  929 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
  930 static void __elfN(putnote)(void *, size_t *, const char *, int,
  931     const void *, size_t);
  932 
  933 extern int osreldate;
  934 
  935 int
  936 __elfN(coredump)(td, vp, limit)
  937         struct thread *td;
  938         struct vnode *vp;
  939         off_t limit;
  940 {
  941         struct ucred *cred = td->td_ucred;
  942         int error = 0;
  943         struct sseg_closure seginfo;
  944         void *hdr;
  945         size_t hdrsize;
  946 
  947         /* Size the program segments. */
  948         seginfo.count = 0;
  949         seginfo.size = 0;
  950         each_writable_segment(td, cb_size_segment, &seginfo);
  951 
  952         /*
  953          * Calculate the size of the core file header area by making
  954          * a dry run of generating it.  Nothing is written, but the
  955          * size is calculated.
  956          */
  957         hdrsize = 0;
  958         __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
  959 
  960         if (hdrsize + seginfo.size >= limit)
  961                 return (EFAULT);
  962 
  963         /*
  964          * Allocate memory for building the header, fill it up,
  965          * and write it out.
  966          */
  967         hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
  968         if (hdr == NULL) {
  969                 return (EINVAL);
  970         }
  971         error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
  972 
  973         /* Write the contents of all of the writable segments. */
  974         if (error == 0) {
  975                 Elf_Phdr *php;
  976                 off_t offset;
  977                 int i;
  978 
  979                 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
  980                 offset = hdrsize;
  981                 for (i = 0; i < seginfo.count; i++) {
  982                         error = vn_rdwr_inchunks(UIO_WRITE, vp,
  983                             (caddr_t)(uintptr_t)php->p_vaddr,
  984                             php->p_filesz, offset, UIO_USERSPACE,
  985                             IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
  986                             curthread); /* XXXKSE */
  987                         if (error != 0)
  988                                 break;
  989                         offset += php->p_filesz;
  990                         php++;
  991                 }
  992         }
  993         free(hdr, M_TEMP);
  994 
  995         return (error);
  996 }
  997 
  998 /*
  999  * A callback for each_writable_segment() to write out the segment's
 1000  * program header entry.
 1001  */
 1002 static void
 1003 cb_put_phdr(entry, closure)
 1004         vm_map_entry_t entry;
 1005         void *closure;
 1006 {
 1007         struct phdr_closure *phc = (struct phdr_closure *)closure;
 1008         Elf_Phdr *phdr = phc->phdr;
 1009 
 1010         phc->offset = round_page(phc->offset);
 1011 
 1012         phdr->p_type = PT_LOAD;
 1013         phdr->p_offset = phc->offset;
 1014         phdr->p_vaddr = entry->start;
 1015         phdr->p_paddr = 0;
 1016         phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
 1017         phdr->p_align = PAGE_SIZE;
 1018         phdr->p_flags = 0;
 1019         if (entry->protection & VM_PROT_READ)
 1020                 phdr->p_flags |= PF_R;
 1021         if (entry->protection & VM_PROT_WRITE)
 1022                 phdr->p_flags |= PF_W;
 1023         if (entry->protection & VM_PROT_EXECUTE)
 1024                 phdr->p_flags |= PF_X;
 1025 
 1026         phc->offset += phdr->p_filesz;
 1027         phc->phdr++;
 1028 }
 1029 
 1030 /*
 1031  * A callback for each_writable_segment() to gather information about
 1032  * the number of segments and their total size.
 1033  */
 1034 static void
 1035 cb_size_segment(entry, closure)
 1036         vm_map_entry_t entry;
 1037         void *closure;
 1038 {
 1039         struct sseg_closure *ssc = (struct sseg_closure *)closure;
 1040 
 1041         ssc->count++;
 1042         ssc->size += entry->end - entry->start;
 1043 }
 1044 
 1045 /*
 1046  * For each writable segment in the process's memory map, call the given
 1047  * function with a pointer to the map entry and some arbitrary
 1048  * caller-supplied data.
 1049  */
 1050 static void
 1051 each_writable_segment(td, func, closure)
 1052         struct thread *td;
 1053         segment_callback func;
 1054         void *closure;
 1055 {
 1056         struct proc *p = td->td_proc;
 1057         vm_map_t map = &p->p_vmspace->vm_map;
 1058         vm_map_entry_t entry;
 1059 
 1060         for (entry = map->header.next; entry != &map->header;
 1061             entry = entry->next) {
 1062                 vm_object_t obj;
 1063 
 1064                 /*
 1065                  * Don't dump inaccessible mappings, deal with legacy
 1066                  * coredump mode.
 1067                  *
 1068                  * Note that read-only segments related to the elf binary
 1069                  * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
 1070                  * need to arbitrarily ignore such segments.
 1071                  */
 1072                 if (elf_legacy_coredump) {
 1073                         if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
 1074                                 continue;
 1075                 } else {
 1076                         if ((entry->protection & VM_PROT_ALL) == 0)
 1077                                 continue;
 1078                 }
 1079 
 1080                 /*
 1081                  * Dont include memory segment in the coredump if
 1082                  * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
 1083                  * madvise(2).  Do not dump submaps (i.e. parts of the
 1084                  * kernel map).
 1085                  */
 1086                 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
 1087                         continue;
 1088 
 1089                 if ((obj = entry->object.vm_object) == NULL)
 1090                         continue;
 1091 
 1092                 /* Find the deepest backing object. */
 1093                 while (obj->backing_object != NULL)
 1094                         obj = obj->backing_object;
 1095 
 1096                 /* Ignore memory-mapped devices and such things. */
 1097                 if (obj->type != OBJT_DEFAULT &&
 1098                     obj->type != OBJT_SWAP &&
 1099                     obj->type != OBJT_VNODE)
 1100                         continue;
 1101 
 1102                 (*func)(entry, closure);
 1103         }
 1104 }
 1105 
 1106 /*
 1107  * Write the core file header to the file, including padding up to
 1108  * the page boundary.
 1109  */
 1110 static int
 1111 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
 1112         struct thread *td;
 1113         struct vnode *vp;
 1114         struct ucred *cred;
 1115         int numsegs;
 1116         size_t hdrsize;
 1117         void *hdr;
 1118 {
 1119         size_t off;
 1120 
 1121         /* Fill in the header. */
 1122         bzero(hdr, hdrsize);
 1123         off = 0;
 1124         __elfN(puthdr)(td, hdr, &off, numsegs);
 1125 
 1126         /* Write it to the core file. */
 1127         return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
 1128             UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
 1129             td)); /* XXXKSE */
 1130 }
 1131 
 1132 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
 1133 typedef struct prstatus32 elf_prstatus_t;
 1134 typedef struct prpsinfo32 elf_prpsinfo_t;
 1135 typedef struct fpreg32 elf_prfpregset_t;
 1136 typedef struct fpreg32 elf_fpregset_t;
 1137 typedef struct reg32 elf_gregset_t;
 1138 #else
 1139 typedef prstatus_t elf_prstatus_t;
 1140 typedef prpsinfo_t elf_prpsinfo_t;
 1141 typedef prfpregset_t elf_prfpregset_t;
 1142 typedef prfpregset_t elf_fpregset_t;
 1143 typedef gregset_t elf_gregset_t;
 1144 #endif
 1145 
 1146 static void
 1147 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
 1148 {
 1149         struct {
 1150                 elf_prstatus_t status;
 1151                 elf_prfpregset_t fpregset;
 1152                 elf_prpsinfo_t psinfo;
 1153         } *tempdata;
 1154         elf_prstatus_t *status;
 1155         elf_prfpregset_t *fpregset;
 1156         elf_prpsinfo_t *psinfo;
 1157         struct proc *p;
 1158         struct thread *thr;
 1159         size_t ehoff, noteoff, notesz, phoff;
 1160 
 1161         p = td->td_proc;
 1162 
 1163         ehoff = *off;
 1164         *off += sizeof(Elf_Ehdr);
 1165 
 1166         phoff = *off;
 1167         *off += (numsegs + 1) * sizeof(Elf_Phdr);
 1168 
 1169         noteoff = *off;
 1170         /*
 1171          * Don't allocate space for the notes if we're just calculating
 1172          * the size of the header. We also don't collect the data.
 1173          */
 1174         if (dst != NULL) {
 1175                 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
 1176                 status = &tempdata->status;
 1177                 fpregset = &tempdata->fpregset;
 1178                 psinfo = &tempdata->psinfo;
 1179         } else {
 1180                 tempdata = NULL;
 1181                 status = NULL;
 1182                 fpregset = NULL;
 1183                 psinfo = NULL;
 1184         }
 1185 
 1186         if (dst != NULL) {
 1187                 psinfo->pr_version = PRPSINFO_VERSION;
 1188                 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
 1189                 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
 1190                 /*
 1191                  * XXX - We don't fill in the command line arguments properly
 1192                  * yet.
 1193                  */
 1194                 strlcpy(psinfo->pr_psargs, p->p_comm,
 1195                     sizeof(psinfo->pr_psargs));
 1196         }
 1197         __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
 1198             sizeof *psinfo);
 1199 
 1200         /*
 1201          * To have the debugger select the right thread (LWP) as the initial
 1202          * thread, we dump the state of the thread passed to us in td first.
 1203          * This is the thread that causes the core dump and thus likely to
 1204          * be the right thread one wants to have selected in the debugger.
 1205          */
 1206         thr = td;
 1207         while (thr != NULL) {
 1208                 if (dst != NULL) {
 1209                         status->pr_version = PRSTATUS_VERSION;
 1210                         status->pr_statussz = sizeof(elf_prstatus_t);
 1211                         status->pr_gregsetsz = sizeof(elf_gregset_t);
 1212                         status->pr_fpregsetsz = sizeof(elf_fpregset_t);
 1213                         status->pr_osreldate = osreldate;
 1214                         status->pr_cursig = p->p_sig;
 1215                         status->pr_pid = thr->td_tid;
 1216 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
 1217                         fill_regs32(thr, &status->pr_reg);
 1218                         fill_fpregs32(thr, fpregset);
 1219 #else
 1220                         fill_regs(thr, &status->pr_reg);
 1221                         fill_fpregs(thr, fpregset);
 1222 #endif
 1223                 }
 1224                 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
 1225                     sizeof *status);
 1226                 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
 1227                     sizeof *fpregset);
 1228                 /*
 1229                  * Allow for MD specific notes, as well as any MD
 1230                  * specific preparations for writing MI notes.
 1231                  */
 1232                 __elfN(dump_thread)(thr, dst, off);
 1233 
 1234                 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
 1235                     TAILQ_NEXT(thr, td_plist);
 1236                 if (thr == td)
 1237                         thr = TAILQ_NEXT(thr, td_plist);
 1238         }
 1239 
 1240         notesz = *off - noteoff;
 1241 
 1242         if (dst != NULL)
 1243                 free(tempdata, M_TEMP);
 1244 
 1245         /* Align up to a page boundary for the program segments. */
 1246         *off = round_page(*off);
 1247 
 1248         if (dst != NULL) {
 1249                 Elf_Ehdr *ehdr;
 1250                 Elf_Phdr *phdr;
 1251                 struct phdr_closure phc;
 1252 
 1253                 /*
 1254                  * Fill in the ELF header.
 1255                  */
 1256                 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
 1257                 ehdr->e_ident[EI_MAG0] = ELFMAG0;
 1258                 ehdr->e_ident[EI_MAG1] = ELFMAG1;
 1259                 ehdr->e_ident[EI_MAG2] = ELFMAG2;
 1260                 ehdr->e_ident[EI_MAG3] = ELFMAG3;
 1261                 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
 1262                 ehdr->e_ident[EI_DATA] = ELF_DATA;
 1263                 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
 1264                 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
 1265                 ehdr->e_ident[EI_ABIVERSION] = 0;
 1266                 ehdr->e_ident[EI_PAD] = 0;
 1267                 ehdr->e_type = ET_CORE;
 1268 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
 1269                 ehdr->e_machine = EM_386;
 1270 #else
 1271                 ehdr->e_machine = ELF_ARCH;
 1272 #endif
 1273                 ehdr->e_version = EV_CURRENT;
 1274                 ehdr->e_entry = 0;
 1275                 ehdr->e_phoff = phoff;
 1276                 ehdr->e_flags = 0;
 1277                 ehdr->e_ehsize = sizeof(Elf_Ehdr);
 1278                 ehdr->e_phentsize = sizeof(Elf_Phdr);
 1279                 ehdr->e_phnum = numsegs + 1;
 1280                 ehdr->e_shentsize = sizeof(Elf_Shdr);
 1281                 ehdr->e_shnum = 0;
 1282                 ehdr->e_shstrndx = SHN_UNDEF;
 1283 
 1284                 /*
 1285                  * Fill in the program header entries.
 1286                  */
 1287                 phdr = (Elf_Phdr *)((char *)dst + phoff);
 1288 
 1289                 /* The note segement. */
 1290                 phdr->p_type = PT_NOTE;
 1291                 phdr->p_offset = noteoff;
 1292                 phdr->p_vaddr = 0;
 1293                 phdr->p_paddr = 0;
 1294                 phdr->p_filesz = notesz;
 1295                 phdr->p_memsz = 0;
 1296                 phdr->p_flags = 0;
 1297                 phdr->p_align = 0;
 1298                 phdr++;
 1299 
 1300                 /* All the writable segments from the program. */
 1301                 phc.phdr = phdr;
 1302                 phc.offset = *off;
 1303                 each_writable_segment(td, cb_put_phdr, &phc);
 1304         }
 1305 }
 1306 
 1307 static void
 1308 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
 1309     const void *desc, size_t descsz)
 1310 {
 1311         Elf_Note note;
 1312 
 1313         note.n_namesz = strlen(name) + 1;
 1314         note.n_descsz = descsz;
 1315         note.n_type = type;
 1316         if (dst != NULL)
 1317                 bcopy(&note, (char *)dst + *off, sizeof note);
 1318         *off += sizeof note;
 1319         if (dst != NULL)
 1320                 bcopy(name, (char *)dst + *off, note.n_namesz);
 1321         *off += roundup2(note.n_namesz, sizeof(Elf_Size));
 1322         if (dst != NULL)
 1323                 bcopy(desc, (char *)dst + *off, note.n_descsz);
 1324         *off += roundup2(note.n_descsz, sizeof(Elf_Size));
 1325 }
 1326 
 1327 /*
 1328  * Tell kern_execve.c about it, with a little help from the linker.
 1329  */
 1330 static struct execsw __elfN(execsw) = {
 1331         __CONCAT(exec_, __elfN(imgact)),
 1332         __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
 1333 };
 1334 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));

Cache object: 84339cbf6c373dc4f719d3b89d3e1f3f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.