The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 David O'Brien
    3  * Copyright (c) 1995-1996 Søren Schmidt
    4  * Copyright (c) 1996 Peter Wemm
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer
   12  *    in this position and unchanged.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include <sys/param.h>
   35 #include <sys/exec.h>
   36 #include <sys/fcntl.h>
   37 #include <sys/imgact.h>
   38 #include <sys/imgact_elf.h>
   39 #include <sys/kernel.h>
   40 #include <sys/lock.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mutex.h>
   43 #include <sys/mman.h>
   44 #include <sys/namei.h>
   45 #include <sys/pioctl.h>
   46 #include <sys/proc.h>
   47 #include <sys/procfs.h>
   48 #include <sys/resourcevar.h>
   49 #include <sys/systm.h>
   50 #include <sys/signalvar.h>
   51 #include <sys/stat.h>
   52 #include <sys/sx.h>
   53 #include <sys/syscall.h>
   54 #include <sys/sysctl.h>
   55 #include <sys/sysent.h>
   56 #include <sys/vnode.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/vm_kern.h>
   60 #include <vm/vm_param.h>
   61 #include <vm/pmap.h>
   62 #include <vm/vm_map.h>
   63 #include <vm/vm_object.h>
   64 #include <vm/vm_extern.h>
   65 
   66 #include <machine/elf.h>
   67 #include <machine/md_var.h>
   68 
   69 #define OLD_EI_BRAND    8
   70 
   71 static int __elfN(check_header)(const Elf_Ehdr *hdr);
   72 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
   73     const char *interp);
   74 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
   75     u_long *entry, size_t pagesize);
   76 static int __elfN(load_section)(struct proc *p,
   77     struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
   78     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
   79     vm_prot_t prot, size_t pagesize);
   80 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
   81 
   82 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
   83     "");
   84 
   85 int __elfN(fallback_brand) = -1;
   86 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
   87     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
   88     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
   89 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
   90     &__elfN(fallback_brand));
   91 
   92 static int elf_trace = 0;
   93 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
   94 
   95 static int elf_legacy_coredump = 0;
   96 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 
   97     &elf_legacy_coredump, 0, "");
   98 
   99 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
  100 
  101 int
  102 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
  103 {
  104         int i;
  105 
  106         for (i = 0; i < MAX_BRANDS; i++) {
  107                 if (elf_brand_list[i] == NULL) {
  108                         elf_brand_list[i] = entry;
  109                         break;
  110                 }
  111         }
  112         if (i == MAX_BRANDS)
  113                 return (-1);
  114         return (0);
  115 }
  116 
  117 int
  118 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
  119 {
  120         int i;
  121 
  122         for (i = 0; i < MAX_BRANDS; i++) {
  123                 if (elf_brand_list[i] == entry) {
  124                         elf_brand_list[i] = NULL;
  125                         break;
  126                 }
  127         }
  128         if (i == MAX_BRANDS)
  129                 return (-1);
  130         return (0);
  131 }
  132 
  133 int
  134 __elfN(brand_inuse)(Elf_Brandinfo *entry)
  135 {
  136         struct proc *p;
  137         int rval = FALSE;
  138 
  139         sx_slock(&allproc_lock);
  140         LIST_FOREACH(p, &allproc, p_list) {
  141                 if (p->p_sysent == entry->sysvec) {
  142                         rval = TRUE;
  143                         break;
  144                 }
  145         }
  146         sx_sunlock(&allproc_lock);
  147 
  148         return (rval);
  149 }
  150 
  151 static Elf_Brandinfo *
  152 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
  153 {
  154         Elf_Brandinfo *bi;
  155         int i;
  156 
  157         /*
  158          * We support three types of branding -- (1) the ELF EI_OSABI field
  159          * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
  160          * branding w/in the ELF header, and (3) path of the `interp_path'
  161          * field.  We should also look for an ".note.ABI-tag" ELF section now
  162          * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
  163          */
  164 
  165         /* If the executable has a brand, search for it in the brand list. */
  166         for (i = 0; i < MAX_BRANDS; i++) {
  167                 bi = elf_brand_list[i];
  168                 if (bi != NULL && hdr->e_machine == bi->machine &&
  169                     (hdr->e_ident[EI_OSABI] == bi->brand ||
  170                     strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
  171                     bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
  172                         return (bi);
  173         }
  174 
  175         /* Lacking a known brand, search for a recognized interpreter. */
  176         if (interp != NULL) {
  177                 for (i = 0; i < MAX_BRANDS; i++) {
  178                         bi = elf_brand_list[i];
  179                         if (bi != NULL && hdr->e_machine == bi->machine &&
  180                             strcmp(interp, bi->interp_path) == 0)
  181                                 return (bi);
  182                 }
  183         }
  184 
  185         /* Lacking a recognized interpreter, try the default brand */
  186         for (i = 0; i < MAX_BRANDS; i++) {
  187                 bi = elf_brand_list[i];
  188                 if (bi != NULL && hdr->e_machine == bi->machine &&
  189                     __elfN(fallback_brand) == bi->brand)
  190                         return (bi);
  191         }
  192         return (NULL);
  193 }
  194 
  195 static int
  196 __elfN(check_header)(const Elf_Ehdr *hdr)
  197 {
  198         Elf_Brandinfo *bi;
  199         int i;
  200 
  201         if (!IS_ELF(*hdr) ||
  202             hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
  203             hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
  204             hdr->e_ident[EI_VERSION] != EV_CURRENT ||
  205             hdr->e_phentsize != sizeof(Elf_Phdr) ||
  206             hdr->e_version != ELF_TARG_VER)
  207                 return (ENOEXEC);
  208 
  209         /*
  210          * Make sure we have at least one brand for this machine.
  211          */
  212 
  213         for (i = 0; i < MAX_BRANDS; i++) {
  214                 bi = elf_brand_list[i];
  215                 if (bi != NULL && bi->machine == hdr->e_machine)
  216                         break;
  217         }
  218         if (i == MAX_BRANDS)
  219                 return (ENOEXEC);
  220 
  221         return (0);
  222 }
  223 
  224 static int
  225 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  226         vm_offset_t start, vm_offset_t end, vm_prot_t prot,
  227         vm_prot_t max)
  228 {
  229         int error, rv;
  230         vm_offset_t off;
  231         vm_offset_t data_buf = 0;
  232 
  233         /*
  234          * Create the page if it doesn't exist yet. Ignore errors.
  235          */
  236         vm_map_lock(map);
  237         vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
  238             max, 0);
  239         vm_map_unlock(map);
  240 
  241         /*
  242          * Find the page from the underlying object.
  243          */
  244         if (object) {
  245                 vm_object_reference(object);
  246                 rv = vm_map_find(exec_map,
  247                                  object,
  248                                  trunc_page(offset),
  249                                  &data_buf,
  250                                  PAGE_SIZE,
  251                                  TRUE,
  252                                  VM_PROT_READ,
  253                                  VM_PROT_ALL,
  254                                  MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
  255                 if (rv != KERN_SUCCESS) {
  256                         vm_object_deallocate(object);
  257                         return (rv);
  258                 }
  259 
  260                 off = offset - trunc_page(offset);
  261                 error = copyout((caddr_t)data_buf + off, (caddr_t)start,
  262                     end - start);
  263                 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
  264                 if (error) {
  265                         return (KERN_FAILURE);
  266                 }
  267         }
  268 
  269         return (KERN_SUCCESS);
  270 }
  271 
  272 static int
  273 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  274         vm_offset_t start, vm_offset_t end, vm_prot_t prot,
  275         vm_prot_t max, int cow)
  276 {
  277         vm_offset_t data_buf, off;
  278         vm_size_t sz;
  279         int error, rv;
  280 
  281         if (start != trunc_page(start)) {
  282                 rv = __elfN(map_partial)(map, object, offset, start,
  283                     round_page(start), prot, max);
  284                 if (rv)
  285                         return (rv);
  286                 offset += round_page(start) - start;
  287                 start = round_page(start);
  288         }
  289         if (end != round_page(end)) {
  290                 rv = __elfN(map_partial)(map, object, offset +
  291                     trunc_page(end) - start, trunc_page(end), end, prot, max);
  292                 if (rv)
  293                         return (rv);
  294                 end = trunc_page(end);
  295         }
  296         if (end > start) {
  297                 if (offset & PAGE_MASK) {
  298                         /*
  299                          * The mapping is not page aligned. This means we have
  300                          * to copy the data. Sigh.
  301                          */
  302                         rv = vm_map_find(map, 0, 0, &start, end - start,
  303                             FALSE, prot, max, 0);
  304                         if (rv)
  305                                 return (rv);
  306                         data_buf = 0;
  307                         while (start < end) {
  308                                 vm_object_reference(object);
  309                                 rv = vm_map_find(exec_map,
  310                                                  object,
  311                                                  trunc_page(offset),
  312                                                  &data_buf,
  313                                                  2 * PAGE_SIZE,
  314                                                  TRUE,
  315                                                  VM_PROT_READ,
  316                                                  VM_PROT_ALL,
  317                                                  (MAP_COPY_ON_WRITE
  318                                                   | MAP_PREFAULT_PARTIAL));
  319                                 if (rv != KERN_SUCCESS) {
  320                                         vm_object_deallocate(object);
  321                                         return (rv);
  322                                 }
  323                                 off = offset - trunc_page(offset);
  324                                 sz = end - start;
  325                                 if (sz > PAGE_SIZE)
  326                                         sz = PAGE_SIZE;
  327                                 error = copyout((caddr_t)data_buf + off,
  328                                     (caddr_t)start, sz);
  329                                 vm_map_remove(exec_map, data_buf,
  330                                     data_buf + 2 * PAGE_SIZE);
  331                                 if (error) {
  332                                         return (KERN_FAILURE);
  333                                 }
  334                                 start += sz;
  335                         }
  336                         rv = KERN_SUCCESS;
  337                 } else {
  338                         vm_map_lock(map);
  339                         rv = vm_map_insert(map, object, offset, start, end,
  340                             prot, max, cow);
  341                         vm_map_unlock(map);
  342                 }
  343                 return (rv);
  344         } else {
  345                 return (KERN_SUCCESS);
  346         }
  347 }
  348 
  349 static int
  350 __elfN(load_section)(struct proc *p, struct vmspace *vmspace,
  351         struct vnode *vp, vm_object_t object, vm_offset_t offset,
  352         caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
  353         size_t pagesize)
  354 {
  355         size_t map_len;
  356         vm_offset_t map_addr;
  357         int error, rv, cow;
  358         size_t copy_len;
  359         vm_offset_t file_addr;
  360         vm_offset_t data_buf = 0;
  361 
  362         GIANT_REQUIRED;
  363 
  364         error = 0;
  365 
  366         /*
  367          * It's necessary to fail if the filsz + offset taken from the
  368          * header is greater than the actual file pager object's size.
  369          * If we were to allow this, then the vm_map_find() below would
  370          * walk right off the end of the file object and into the ether.
  371          *
  372          * While I'm here, might as well check for something else that
  373          * is invalid: filsz cannot be greater than memsz.
  374          */
  375         if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
  376             filsz > memsz) {
  377                 uprintf("elf_load_section: truncated ELF file\n");
  378                 return (ENOEXEC);
  379         }
  380 
  381 #define trunc_page_ps(va, ps)   ((va) & ~(ps - 1))
  382 #define round_page_ps(va, ps)   (((va) + (ps - 1)) & ~(ps - 1))
  383 
  384         map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
  385         file_addr = trunc_page_ps(offset, pagesize);
  386 
  387         /*
  388          * We have two choices.  We can either clear the data in the last page
  389          * of an oversized mapping, or we can start the anon mapping a page
  390          * early and copy the initialized data into that first page.  We
  391          * choose the second..
  392          */
  393         if (memsz > filsz)
  394                 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
  395         else
  396                 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
  397 
  398         if (map_len != 0) {
  399                 vm_object_reference(object);
  400 
  401                 /* cow flags: don't dump readonly sections in core */
  402                 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
  403                     (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
  404 
  405                 rv = __elfN(map_insert)(&vmspace->vm_map,
  406                                       object,
  407                                       file_addr,        /* file offset */
  408                                       map_addr,         /* virtual start */
  409                                       map_addr + map_len,/* virtual end */
  410                                       prot,
  411                                       VM_PROT_ALL,
  412                                       cow);
  413                 if (rv != KERN_SUCCESS) {
  414                         vm_object_deallocate(object);
  415                         return (EINVAL);
  416                 }
  417 
  418                 /* we can stop now if we've covered it all */
  419                 if (memsz == filsz) {
  420                         return (0);
  421                 }
  422         }
  423 
  424 
  425         /*
  426          * We have to get the remaining bit of the file into the first part
  427          * of the oversized map segment.  This is normally because the .data
  428          * segment in the file is extended to provide bss.  It's a neat idea
  429          * to try and save a page, but it's a pain in the behind to implement.
  430          */
  431         copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
  432         map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
  433         map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
  434             map_addr;
  435 
  436         /* This had damn well better be true! */
  437         if (map_len != 0) {
  438                 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
  439                     map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
  440                 if (rv != KERN_SUCCESS) {
  441                         return (EINVAL);
  442                 }
  443         }
  444 
  445         if (copy_len != 0) {
  446                 vm_offset_t off;
  447                 vm_object_reference(object);
  448                 rv = vm_map_find(exec_map,
  449                                  object,
  450                                  trunc_page(offset + filsz),
  451                                  &data_buf,
  452                                  PAGE_SIZE,
  453                                  TRUE,
  454                                  VM_PROT_READ,
  455                                  VM_PROT_ALL,
  456                                  MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
  457                 if (rv != KERN_SUCCESS) {
  458                         vm_object_deallocate(object);
  459                         return (EINVAL);
  460                 }
  461 
  462                 /* send the page fragment to user space */
  463                 off = trunc_page_ps(offset + filsz, pagesize) -
  464                     trunc_page(offset + filsz);
  465                 error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
  466                     copy_len);
  467                 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
  468                 if (error) {
  469                         return (error);
  470                 }
  471         }
  472 
  473         /*
  474          * set it to the specified protection.
  475          * XXX had better undo the damage from pasting over the cracks here!
  476          */
  477         vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
  478             round_page(map_addr + map_len),  prot, FALSE);
  479 
  480         return (error);
  481 }
  482 
  483 /*
  484  * Load the file "file" into memory.  It may be either a shared object
  485  * or an executable.
  486  *
  487  * The "addr" reference parameter is in/out.  On entry, it specifies
  488  * the address where a shared object should be loaded.  If the file is
  489  * an executable, this value is ignored.  On exit, "addr" specifies
  490  * where the file was actually loaded.
  491  *
  492  * The "entry" reference parameter is out only.  On exit, it specifies
  493  * the entry point for the loaded file.
  494  */
  495 static int
  496 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
  497         u_long *entry, size_t pagesize)
  498 {
  499         struct {
  500                 struct nameidata nd;
  501                 struct vattr attr;
  502                 struct image_params image_params;
  503         } *tempdata;
  504         const Elf_Ehdr *hdr = NULL;
  505         const Elf_Phdr *phdr = NULL;
  506         struct nameidata *nd;
  507         struct vmspace *vmspace = p->p_vmspace;
  508         struct vattr *attr;
  509         struct image_params *imgp;
  510         vm_prot_t prot;
  511         u_long rbase;
  512         u_long base_addr = 0;
  513         int error, i, numsegs;
  514 
  515         if (curthread->td_proc != p)
  516                 panic("elf_load_file - thread");        /* XXXKSE DIAGNOSTIC */
  517 
  518         tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
  519         nd = &tempdata->nd;
  520         attr = &tempdata->attr;
  521         imgp = &tempdata->image_params;
  522 
  523         /*
  524          * Initialize part of the common data
  525          */
  526         imgp->proc = p;
  527         imgp->userspace_argv = NULL;
  528         imgp->userspace_envv = NULL;
  529         imgp->attr = attr;
  530         imgp->firstpage = NULL;
  531         imgp->image_header = NULL;
  532         imgp->object = NULL;
  533         imgp->execlabel = NULL;
  534 
  535         /* XXXKSE */
  536         NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
  537 
  538         if ((error = namei(nd)) != 0) {
  539                 nd->ni_vp = NULL;
  540                 goto fail;
  541         }
  542         NDFREE(nd, NDF_ONLY_PNBUF);
  543         imgp->vp = nd->ni_vp;
  544 
  545         /*
  546          * Check permissions, modes, uid, etc on the file, and "open" it.
  547          */
  548         error = exec_check_permissions(imgp);
  549         if (error) {
  550                 VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
  551                 goto fail;
  552         }
  553 
  554         error = exec_map_first_page(imgp);
  555         /*
  556          * Also make certain that the interpreter stays the same, so set
  557          * its VV_TEXT flag, too.
  558          */
  559         if (error == 0)
  560                 nd->ni_vp->v_vflag |= VV_TEXT;
  561 
  562         VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
  563         vm_object_reference(imgp->object);
  564 
  565         VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
  566         if (error)
  567                 goto fail;
  568 
  569         hdr = (const Elf_Ehdr *)imgp->image_header;
  570         if ((error = __elfN(check_header)(hdr)) != 0)
  571                 goto fail;
  572         if (hdr->e_type == ET_DYN)
  573                 rbase = *addr;
  574         else if (hdr->e_type == ET_EXEC)
  575                 rbase = 0;
  576         else {
  577                 error = ENOEXEC;
  578                 goto fail;
  579         }
  580 
  581         /* Only support headers that fit within first page for now      */
  582         /*    (multiplication of two Elf_Half fields will not overflow) */
  583         if ((hdr->e_phoff > PAGE_SIZE) ||
  584             (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
  585                 error = ENOEXEC;
  586                 goto fail;
  587         }
  588 
  589         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  590 
  591         for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
  592                 if (phdr[i].p_type == PT_LOAD) {        /* Loadable segment */
  593                         prot = 0;
  594                         if (phdr[i].p_flags & PF_X)
  595                                 prot |= VM_PROT_EXECUTE;
  596                         if (phdr[i].p_flags & PF_W)
  597                                 prot |= VM_PROT_WRITE;
  598                         if (phdr[i].p_flags & PF_R)
  599                                 prot |= VM_PROT_READ;
  600 
  601                         if ((error = __elfN(load_section)(p, vmspace,
  602                             nd->ni_vp, imgp->object, phdr[i].p_offset,
  603                             (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
  604                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  605                             pagesize)) != 0)
  606                                 goto fail;
  607                         /*
  608                          * Establish the base address if this is the
  609                          * first segment.
  610                          */
  611                         if (numsegs == 0)
  612                                 base_addr = trunc_page(phdr[i].p_vaddr +
  613                                     rbase);
  614                         numsegs++;
  615                 }
  616         }
  617         *addr = base_addr;
  618         *entry = (unsigned long)hdr->e_entry + rbase;
  619 
  620 fail:
  621         if (imgp->firstpage)
  622                 exec_unmap_first_page(imgp);
  623         if (imgp->object)
  624                 vm_object_deallocate(imgp->object);
  625 
  626         if (nd->ni_vp)
  627                 vrele(nd->ni_vp);
  628 
  629         free(tempdata, M_TEMP);
  630 
  631         return (error);
  632 }
  633 
  634 static int
  635 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
  636 {
  637         const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
  638         const Elf_Phdr *phdr;
  639         Elf_Auxargs *elf_auxargs = NULL;
  640         struct vmspace *vmspace;
  641         vm_prot_t prot;
  642         u_long text_size = 0, data_size = 0, total_size = 0;
  643         u_long text_addr = 0, data_addr = 0;
  644         u_long seg_size, seg_addr;
  645         u_long addr, entry = 0, proghdr = 0;
  646         int error, i;
  647         const char *interp = NULL;
  648         Elf_Brandinfo *brand_info;
  649         char *path;
  650         struct thread *td = curthread;
  651         struct sysentvec *sv;
  652 
  653         GIANT_REQUIRED;
  654 
  655         /*
  656          * Do we have a valid ELF header ?
  657          *
  658          * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
  659          * if particular brand doesn't support it.
  660          */
  661         if (__elfN(check_header)(hdr) != 0 ||
  662             (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
  663                 return (-1);
  664 
  665         /*
  666          * From here on down, we return an errno, not -1, as we've
  667          * detected an ELF file.
  668          */
  669 
  670         if ((hdr->e_phoff > PAGE_SIZE) ||
  671             (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
  672                 /* Only support headers in first page for now */
  673                 return (ENOEXEC);
  674         }
  675         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  676 
  677         /*
  678          * From this point on, we may have resources that need to be freed.
  679          */
  680 
  681         VOP_UNLOCK(imgp->vp, 0, td);
  682 
  683         for (i = 0; i < hdr->e_phnum; i++) {
  684                 switch (phdr[i].p_type) {
  685                 case PT_INTERP: /* Path to interpreter */
  686                         if (phdr[i].p_filesz > MAXPATHLEN ||
  687                             phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
  688                                 error = ENOEXEC;
  689                                 goto fail;
  690                         }
  691                         interp = imgp->image_header + phdr[i].p_offset;
  692                         break;
  693                 default:
  694                         break;
  695                 }
  696         }
  697 
  698         brand_info = __elfN(get_brandinfo)(hdr, interp);
  699         if (brand_info == NULL) {
  700                 uprintf("ELF binary type \"%u\" not known.\n",
  701                     hdr->e_ident[EI_OSABI]);
  702                 error = ENOEXEC;
  703                 goto fail;
  704         }
  705         if (hdr->e_type == ET_DYN && brand_info->brand != ELFOSABI_LINUX) {
  706                 error = ENOEXEC;
  707                 goto fail;
  708         }
  709         sv = brand_info->sysvec;
  710         if (interp != NULL && brand_info->interp_newpath != NULL)
  711                 interp = brand_info->interp_newpath;
  712 
  713         if ((error = exec_extract_strings(imgp)) != 0)
  714                 goto fail;
  715 
  716         exec_new_vmspace(imgp, sv);
  717 
  718         vmspace = imgp->proc->p_vmspace;
  719 
  720         for (i = 0; i < hdr->e_phnum; i++) {
  721                 switch (phdr[i].p_type) {
  722                 case PT_LOAD:   /* Loadable segment */
  723                         prot = 0;
  724                         if (phdr[i].p_flags & PF_X)
  725                                 prot |= VM_PROT_EXECUTE;
  726                         if (phdr[i].p_flags & PF_W)
  727                                 prot |= VM_PROT_WRITE;
  728                         if (phdr[i].p_flags & PF_R)
  729                                 prot |= VM_PROT_READ;
  730 
  731 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
  732                         /*
  733                          * Some x86 binaries assume read == executable,
  734                          * notably the M3 runtime and therefore cvsup
  735                          */
  736                         if (prot & VM_PROT_READ)
  737                                 prot |= VM_PROT_EXECUTE;
  738 #endif
  739 
  740                         if ((error = __elfN(load_section)(imgp->proc, vmspace,
  741                             imgp->vp, imgp->object, phdr[i].p_offset,
  742                             (caddr_t)(uintptr_t)phdr[i].p_vaddr,
  743                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  744                             sv->sv_pagesize)) != 0)
  745                                 goto fail;
  746 
  747                         /*
  748                          * If this segment contains the program headers,
  749                          * remember their virtual address for the AT_PHDR
  750                          * aux entry. Static binaries don't usually include
  751                          * a PT_PHDR entry.
  752                          */
  753                         if (phdr[i].p_offset == 0 &&
  754                             hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
  755                                 <= phdr[i].p_filesz)
  756                                 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
  757 
  758                         seg_addr = trunc_page(phdr[i].p_vaddr);
  759                         seg_size = round_page(phdr[i].p_memsz +
  760                             phdr[i].p_vaddr - seg_addr);
  761 
  762                         /*
  763                          * Is this .text or .data?  We can't use
  764                          * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
  765                          * alpha terribly and possibly does other bad
  766                          * things so we stick to the old way of figuring
  767                          * it out:  If the segment contains the program
  768                          * entry point, it's a text segment, otherwise it
  769                          * is a data segment.
  770                          *
  771                          * Note that obreak() assumes that data_addr + 
  772                          * data_size == end of data load area, and the ELF
  773                          * file format expects segments to be sorted by
  774                          * address.  If multiple data segments exist, the
  775                          * last one will be used.
  776                          */
  777                         if (hdr->e_entry >= phdr[i].p_vaddr &&
  778                             hdr->e_entry < (phdr[i].p_vaddr +
  779                             phdr[i].p_memsz)) {
  780                                 text_size = seg_size;
  781                                 text_addr = seg_addr;
  782                                 entry = (u_long)hdr->e_entry;
  783                         } else {
  784                                 data_size = seg_size;
  785                                 data_addr = seg_addr;
  786                         }
  787                         total_size += seg_size;
  788                         break;
  789                 case PT_PHDR:   /* Program header table info */
  790                         proghdr = phdr[i].p_vaddr;
  791                         break;
  792                 default:
  793                         break;
  794                 }
  795         }
  796         
  797         if (data_addr == 0 && data_size == 0) {
  798                 data_addr = text_addr;
  799                 data_size = text_size;
  800         }
  801 
  802         /*
  803          * Check limits.  It should be safe to check the
  804          * limits after loading the segments since we do
  805          * not actually fault in all the segments pages.
  806          */
  807         PROC_LOCK(imgp->proc);
  808         if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
  809             text_size > maxtsiz ||
  810             total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
  811                 PROC_UNLOCK(imgp->proc);
  812                 error = ENOMEM;
  813                 goto fail;
  814         }
  815 
  816         vmspace->vm_tsize = text_size >> PAGE_SHIFT;
  817         vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
  818         vmspace->vm_dsize = data_size >> PAGE_SHIFT;
  819         vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
  820 
  821         /*
  822          * We load the dynamic linker where a userland call
  823          * to mmap(0, ...) would put it.  The rationale behind this
  824          * calculation is that it leaves room for the heap to grow to
  825          * its maximum allowed size.
  826          */
  827         addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
  828             lim_max(imgp->proc, RLIMIT_DATA));
  829         PROC_UNLOCK(imgp->proc);
  830 
  831         imgp->entry_addr = entry;
  832 
  833         imgp->proc->p_sysent = sv;
  834         if (interp != NULL && brand_info->emul_path != NULL &&
  835             brand_info->emul_path[0] != '\0') {
  836                 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  837                 snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
  838                     interp);
  839                 error = __elfN(load_file)(imgp->proc, path, &addr,
  840                     &imgp->entry_addr, sv->sv_pagesize);
  841                 free(path, M_TEMP);
  842                 if (error == 0)
  843                         interp = NULL;
  844         }
  845         if (interp != NULL) {
  846                 error = __elfN(load_file)(imgp->proc, interp, &addr,
  847                     &imgp->entry_addr, sv->sv_pagesize);
  848                 if (error != 0) {
  849                         uprintf("ELF interpreter %s not found\n", interp);
  850                         goto fail;
  851                 }
  852         }
  853 
  854         /*
  855          * Construct auxargs table (used by the fixup routine)
  856          */
  857         elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
  858         elf_auxargs->execfd = -1;
  859         elf_auxargs->phdr = proghdr;
  860         elf_auxargs->phent = hdr->e_phentsize;
  861         elf_auxargs->phnum = hdr->e_phnum;
  862         elf_auxargs->pagesz = PAGE_SIZE;
  863         elf_auxargs->base = addr;
  864         elf_auxargs->flags = 0;
  865         elf_auxargs->entry = entry;
  866         elf_auxargs->trace = elf_trace;
  867 
  868         imgp->auxargs = elf_auxargs;
  869         imgp->interpreted = 0;
  870 
  871 fail:
  872         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
  873         return (error);
  874 }
  875 
  876 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
  877 
  878 int
  879 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
  880 {
  881         Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
  882         Elf_Addr *base;
  883         Elf_Addr *pos;
  884 
  885         base = (Elf_Addr *)*stack_base;
  886         pos = base + (imgp->argc + imgp->envc + 2);
  887 
  888         if (args->trace) {
  889                 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
  890         }
  891         if (args->execfd != -1) {
  892                 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
  893         }
  894         AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
  895         AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
  896         AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
  897         AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
  898         AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
  899         AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
  900         AUXARGS_ENTRY(pos, AT_BASE, args->base);
  901         AUXARGS_ENTRY(pos, AT_NULL, 0);
  902 
  903         free(imgp->auxargs, M_TEMP);
  904         imgp->auxargs = NULL;
  905 
  906         base--;
  907         suword(base, (long)imgp->argc);
  908         *stack_base = (register_t *)base;
  909         return (0);
  910 }
  911 
  912 /*
  913  * Code for generating ELF core dumps.
  914  */
  915 
  916 typedef void (*segment_callback)(vm_map_entry_t, void *);
  917 
  918 /* Closure for cb_put_phdr(). */
  919 struct phdr_closure {
  920         Elf_Phdr *phdr;         /* Program header to fill in */
  921         Elf_Off offset;         /* Offset of segment in core file */
  922 };
  923 
  924 /* Closure for cb_size_segment(). */
  925 struct sseg_closure {
  926         int count;              /* Count of writable segments. */
  927         size_t size;            /* Total size of all writable segments. */
  928 };
  929 
  930 static void cb_put_phdr(vm_map_entry_t, void *);
  931 static void cb_size_segment(vm_map_entry_t, void *);
  932 static void each_writable_segment(struct thread *, segment_callback, void *);
  933 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
  934     int, void *, size_t);
  935 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
  936 static void __elfN(putnote)(void *, size_t *, const char *, int,
  937     const void *, size_t);
  938 
  939 extern int osreldate;
  940 
  941 int
  942 __elfN(coredump)(td, vp, limit)
  943         struct thread *td;
  944         struct vnode *vp;
  945         off_t limit;
  946 {
  947         struct ucred *cred = td->td_ucred;
  948         int error = 0;
  949         struct sseg_closure seginfo;
  950         void *hdr;
  951         size_t hdrsize;
  952 
  953         /* Size the program segments. */
  954         seginfo.count = 0;
  955         seginfo.size = 0;
  956         each_writable_segment(td, cb_size_segment, &seginfo);
  957 
  958         /*
  959          * Calculate the size of the core file header area by making
  960          * a dry run of generating it.  Nothing is written, but the
  961          * size is calculated.
  962          */
  963         hdrsize = 0;
  964         __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
  965 
  966         if (hdrsize + seginfo.size >= limit)
  967                 return (EFAULT);
  968 
  969         /*
  970          * Allocate memory for building the header, fill it up,
  971          * and write it out.
  972          */
  973         hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
  974         if (hdr == NULL) {
  975                 return (EINVAL);
  976         }
  977         error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
  978 
  979         /* Write the contents of all of the writable segments. */
  980         if (error == 0) {
  981                 Elf_Phdr *php;
  982                 off_t offset;
  983                 int i;
  984 
  985                 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
  986                 offset = hdrsize;
  987                 for (i = 0; i < seginfo.count; i++) {
  988                         error = vn_rdwr_inchunks(UIO_WRITE, vp,
  989                             (caddr_t)(uintptr_t)php->p_vaddr,
  990                             php->p_filesz, offset, UIO_USERSPACE,
  991                             IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
  992                             curthread); /* XXXKSE */
  993                         if (error != 0)
  994                                 break;
  995                         offset += php->p_filesz;
  996                         php++;
  997                 }
  998         }
  999         free(hdr, M_TEMP);
 1000 
 1001         return (error);
 1002 }
 1003 
 1004 /*
 1005  * A callback for each_writable_segment() to write out the segment's
 1006  * program header entry.
 1007  */
 1008 static void
 1009 cb_put_phdr(entry, closure)
 1010         vm_map_entry_t entry;
 1011         void *closure;
 1012 {
 1013         struct phdr_closure *phc = (struct phdr_closure *)closure;
 1014         Elf_Phdr *phdr = phc->phdr;
 1015 
 1016         phc->offset = round_page(phc->offset);
 1017 
 1018         phdr->p_type = PT_LOAD;
 1019         phdr->p_offset = phc->offset;
 1020         phdr->p_vaddr = entry->start;
 1021         phdr->p_paddr = 0;
 1022         phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
 1023         phdr->p_align = PAGE_SIZE;
 1024         phdr->p_flags = 0;
 1025         if (entry->protection & VM_PROT_READ)
 1026                 phdr->p_flags |= PF_R;
 1027         if (entry->protection & VM_PROT_WRITE)
 1028                 phdr->p_flags |= PF_W;
 1029         if (entry->protection & VM_PROT_EXECUTE)
 1030                 phdr->p_flags |= PF_X;
 1031 
 1032         phc->offset += phdr->p_filesz;
 1033         phc->phdr++;
 1034 }
 1035 
 1036 /*
 1037  * A callback for each_writable_segment() to gather information about
 1038  * the number of segments and their total size.
 1039  */
 1040 static void
 1041 cb_size_segment(entry, closure)
 1042         vm_map_entry_t entry;
 1043         void *closure;
 1044 {
 1045         struct sseg_closure *ssc = (struct sseg_closure *)closure;
 1046 
 1047         ssc->count++;
 1048         ssc->size += entry->end - entry->start;
 1049 }
 1050 
 1051 /*
 1052  * For each writable segment in the process's memory map, call the given
 1053  * function with a pointer to the map entry and some arbitrary
 1054  * caller-supplied data.
 1055  */
 1056 static void
 1057 each_writable_segment(td, func, closure)
 1058         struct thread *td;
 1059         segment_callback func;
 1060         void *closure;
 1061 {
 1062         struct proc *p = td->td_proc;
 1063         vm_map_t map = &p->p_vmspace->vm_map;
 1064         vm_map_entry_t entry;
 1065 
 1066         for (entry = map->header.next; entry != &map->header;
 1067             entry = entry->next) {
 1068                 vm_object_t obj;
 1069 
 1070                 /*
 1071                  * Don't dump inaccessible mappings, deal with legacy
 1072                  * coredump mode.
 1073                  *
 1074                  * Note that read-only segments related to the elf binary
 1075                  * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
 1076                  * need to arbitrarily ignore such segments.
 1077                  */
 1078                 if (elf_legacy_coredump) {
 1079                         if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
 1080                                 continue;
 1081                 } else {
 1082                         if ((entry->protection & VM_PROT_ALL) == 0)
 1083                                 continue;
 1084                 }
 1085 
 1086                 /*
 1087                  * Dont include memory segment in the coredump if
 1088                  * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
 1089                  * madvise(2).  Do not dump submaps (i.e. parts of the
 1090                  * kernel map).
 1091                  */
 1092                 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
 1093                         continue;
 1094 
 1095                 if ((obj = entry->object.vm_object) == NULL)
 1096                         continue;
 1097 
 1098                 /* Find the deepest backing object. */
 1099                 while (obj->backing_object != NULL)
 1100                         obj = obj->backing_object;
 1101 
 1102                 /* Ignore memory-mapped devices and such things. */
 1103                 if (obj->type != OBJT_DEFAULT &&
 1104                     obj->type != OBJT_SWAP &&
 1105                     obj->type != OBJT_VNODE)
 1106                         continue;
 1107 
 1108                 (*func)(entry, closure);
 1109         }
 1110 }
 1111 
 1112 /*
 1113  * Write the core file header to the file, including padding up to
 1114  * the page boundary.
 1115  */
 1116 static int
 1117 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
 1118         struct thread *td;
 1119         struct vnode *vp;
 1120         struct ucred *cred;
 1121         int numsegs;
 1122         size_t hdrsize;
 1123         void *hdr;
 1124 {
 1125         size_t off;
 1126 
 1127         /* Fill in the header. */
 1128         bzero(hdr, hdrsize);
 1129         off = 0;
 1130         __elfN(puthdr)(td, hdr, &off, numsegs);
 1131 
 1132         /* Write it to the core file. */
 1133         return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
 1134             UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
 1135             td)); /* XXXKSE */
 1136 }
 1137 
 1138 static void
 1139 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
 1140 {
 1141         struct {
 1142                 prstatus_t status;
 1143                 prfpregset_t fpregset;
 1144                 prpsinfo_t psinfo;
 1145         } *tempdata;
 1146         prstatus_t *status;
 1147         prfpregset_t *fpregset;
 1148         prpsinfo_t *psinfo;
 1149         struct proc *p;
 1150         struct thread *thr;
 1151         size_t ehoff, noteoff, notesz, phoff;
 1152 
 1153         p = td->td_proc;
 1154 
 1155         ehoff = *off;
 1156         *off += sizeof(Elf_Ehdr);
 1157 
 1158         phoff = *off;
 1159         *off += (numsegs + 1) * sizeof(Elf_Phdr);
 1160 
 1161         noteoff = *off;
 1162         /*
 1163          * Don't allocate space for the notes if we're just calculating
 1164          * the size of the header. We also don't collect the data.
 1165          */
 1166         if (dst != NULL) {
 1167                 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
 1168                 status = &tempdata->status;
 1169                 fpregset = &tempdata->fpregset;
 1170                 psinfo = &tempdata->psinfo;
 1171         } else {
 1172                 tempdata = NULL;
 1173                 status = NULL;
 1174                 fpregset = NULL;
 1175                 psinfo = NULL;
 1176         }
 1177 
 1178         if (dst != NULL) {
 1179                 psinfo->pr_version = PRPSINFO_VERSION;
 1180                 psinfo->pr_psinfosz = sizeof(prpsinfo_t);
 1181                 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
 1182                 /*
 1183                  * XXX - We don't fill in the command line arguments properly
 1184                  * yet.
 1185                  */
 1186                 strlcpy(psinfo->pr_psargs, p->p_comm,
 1187                     sizeof(psinfo->pr_psargs));
 1188         }
 1189         __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
 1190             sizeof *psinfo);
 1191 
 1192         /*
 1193          * To have the debugger select the right thread (LWP) as the initial
 1194          * thread, we dump the state of the thread passed to us in td first.
 1195          * This is the thread that causes the core dump and thus likely to
 1196          * be the right thread one wants to have selected in the debugger.
 1197          */
 1198         thr = td;
 1199         while (thr != NULL) {
 1200                 if (dst != NULL) {
 1201                         status->pr_version = PRSTATUS_VERSION;
 1202                         status->pr_statussz = sizeof(prstatus_t);
 1203                         status->pr_gregsetsz = sizeof(gregset_t);
 1204                         status->pr_fpregsetsz = sizeof(fpregset_t);
 1205                         status->pr_osreldate = osreldate;
 1206                         status->pr_cursig = p->p_sig;
 1207                         status->pr_pid = thr->td_tid;
 1208                         fill_regs(thr, &status->pr_reg);
 1209                         fill_fpregs(thr, fpregset);
 1210                 }
 1211                 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
 1212                     sizeof *status);
 1213                 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
 1214                     sizeof *fpregset);
 1215                 /*
 1216                  * Allow for MD specific notes, as well as any MD
 1217                  * specific preparations for writing MI notes.
 1218                  */
 1219                 __elfN(dump_thread)(thr, dst, off);
 1220 
 1221                 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
 1222                     TAILQ_NEXT(thr, td_plist);
 1223                 if (thr == td)
 1224                         thr = TAILQ_NEXT(thr, td_plist);
 1225         }
 1226 
 1227         notesz = *off - noteoff;
 1228 
 1229         if (dst != NULL)
 1230                 free(tempdata, M_TEMP);
 1231 
 1232         /* Align up to a page boundary for the program segments. */
 1233         *off = round_page(*off);
 1234 
 1235         if (dst != NULL) {
 1236                 Elf_Ehdr *ehdr;
 1237                 Elf_Phdr *phdr;
 1238                 struct phdr_closure phc;
 1239 
 1240                 /*
 1241                  * Fill in the ELF header.
 1242                  */
 1243                 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
 1244                 ehdr->e_ident[EI_MAG0] = ELFMAG0;
 1245                 ehdr->e_ident[EI_MAG1] = ELFMAG1;
 1246                 ehdr->e_ident[EI_MAG2] = ELFMAG2;
 1247                 ehdr->e_ident[EI_MAG3] = ELFMAG3;
 1248                 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
 1249                 ehdr->e_ident[EI_DATA] = ELF_DATA;
 1250                 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
 1251                 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
 1252                 ehdr->e_ident[EI_ABIVERSION] = 0;
 1253                 ehdr->e_ident[EI_PAD] = 0;
 1254                 ehdr->e_type = ET_CORE;
 1255                 ehdr->e_machine = ELF_ARCH;
 1256                 ehdr->e_version = EV_CURRENT;
 1257                 ehdr->e_entry = 0;
 1258                 ehdr->e_phoff = phoff;
 1259                 ehdr->e_flags = 0;
 1260                 ehdr->e_ehsize = sizeof(Elf_Ehdr);
 1261                 ehdr->e_phentsize = sizeof(Elf_Phdr);
 1262                 ehdr->e_phnum = numsegs + 1;
 1263                 ehdr->e_shentsize = sizeof(Elf_Shdr);
 1264                 ehdr->e_shnum = 0;
 1265                 ehdr->e_shstrndx = SHN_UNDEF;
 1266 
 1267                 /*
 1268                  * Fill in the program header entries.
 1269                  */
 1270                 phdr = (Elf_Phdr *)((char *)dst + phoff);
 1271 
 1272                 /* The note segement. */
 1273                 phdr->p_type = PT_NOTE;
 1274                 phdr->p_offset = noteoff;
 1275                 phdr->p_vaddr = 0;
 1276                 phdr->p_paddr = 0;
 1277                 phdr->p_filesz = notesz;
 1278                 phdr->p_memsz = 0;
 1279                 phdr->p_flags = 0;
 1280                 phdr->p_align = 0;
 1281                 phdr++;
 1282 
 1283                 /* All the writable segments from the program. */
 1284                 phc.phdr = phdr;
 1285                 phc.offset = *off;
 1286                 each_writable_segment(td, cb_put_phdr, &phc);
 1287         }
 1288 }
 1289 
 1290 static void
 1291 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
 1292     const void *desc, size_t descsz)
 1293 {
 1294         Elf_Note note;
 1295 
 1296         note.n_namesz = strlen(name) + 1;
 1297         note.n_descsz = descsz;
 1298         note.n_type = type;
 1299         if (dst != NULL)
 1300                 bcopy(&note, (char *)dst + *off, sizeof note);
 1301         *off += sizeof note;
 1302         if (dst != NULL)
 1303                 bcopy(name, (char *)dst + *off, note.n_namesz);
 1304         *off += roundup2(note.n_namesz, sizeof(Elf_Size));
 1305         if (dst != NULL)
 1306                 bcopy(desc, (char *)dst + *off, note.n_descsz);
 1307         *off += roundup2(note.n_descsz, sizeof(Elf_Size));
 1308 }
 1309 
 1310 /*
 1311  * Tell kern_execve.c about it, with a little help from the linker.
 1312  */
 1313 static struct execsw __elfN(execsw) = {
 1314         __CONCAT(exec_, __elfN(imgact)),
 1315         __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
 1316 };
 1317 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));

Cache object: f560efb986fa2ea3749d3d60ba766c9d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.