The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 David O'Brien
    3  * Copyright (c) 1995-1996 Søren Schmidt
    4  * Copyright (c) 1996 Peter Wemm
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer
   12  *    in this position and unchanged.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include "opt_compat.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/exec.h>
   38 #include <sys/fcntl.h>
   39 #include <sys/imgact.h>
   40 #include <sys/imgact_elf.h>
   41 #include <sys/kernel.h>
   42 #include <sys/lock.h>
   43 #include <sys/malloc.h>
   44 #include <sys/mount.h>
   45 #include <sys/mutex.h>
   46 #include <sys/mman.h>
   47 #include <sys/namei.h>
   48 #include <sys/pioctl.h>
   49 #include <sys/proc.h>
   50 #include <sys/procfs.h>
   51 #include <sys/resourcevar.h>
   52 #include <sys/sf_buf.h>
   53 #include <sys/systm.h>
   54 #include <sys/signalvar.h>
   55 #include <sys/stat.h>
   56 #include <sys/sx.h>
   57 #include <sys/syscall.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/sysent.h>
   60 #include <sys/vnode.h>
   61 
   62 #include <vm/vm.h>
   63 #include <vm/vm_kern.h>
   64 #include <vm/vm_param.h>
   65 #include <vm/pmap.h>
   66 #include <vm/vm_map.h>
   67 #include <vm/vm_object.h>
   68 #include <vm/vm_extern.h>
   69 
   70 #include <machine/elf.h>
   71 #include <machine/md_var.h>
   72 
   73 #define OLD_EI_BRAND    8
   74 
   75 static int __elfN(check_header)(const Elf_Ehdr *hdr);
   76 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
   77     const char *interp, int interp_name_len, int32_t *osrel);
   78 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
   79     u_long *entry, size_t pagesize);
   80 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
   81     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
   82     vm_prot_t prot, size_t pagesize);
   83 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
   84 static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
   85     int32_t *osrel);
   86 static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
   87 static boolean_t __elfN(check_note)(struct image_params *imgp,
   88     Elf_Brandnote *checknote, int32_t *osrel);
   89 static vm_prot_t __elfN(trans_prot)(Elf_Word);
   90 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
   91 
   92 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
   93     "");
   94 
   95 int __elfN(fallback_brand) = -1;
   96 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
   97     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
   98     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
   99 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
  100     &__elfN(fallback_brand));
  101 
  102 static int elf_legacy_coredump = 0;
  103 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 
  104     &elf_legacy_coredump, 0, "");
  105 
  106 #if __ELF_WORD_SIZE == 32
  107 #if defined(__amd64__) || defined(__ia64__)
  108 int i386_read_exec = 0;
  109 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
  110     "enable execution from readable segments");
  111 #endif
  112 #endif
  113 
  114 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
  115 
  116 #define trunc_page_ps(va, ps)   ((va) & ~(ps - 1))
  117 #define round_page_ps(va, ps)   (((va) + (ps - 1)) & ~(ps - 1))
  118 #define aligned(a, t)   (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
  119 
  120 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
  121 
  122 Elf_Brandnote __elfN(freebsd_brandnote) = {
  123         .hdr.n_namesz   = sizeof(FREEBSD_ABI_VENDOR),
  124         .hdr.n_descsz   = sizeof(int32_t),
  125         .hdr.n_type     = 1,
  126         .vendor         = FREEBSD_ABI_VENDOR,
  127         .flags          = BN_TRANSLATE_OSREL,
  128         .trans_osrel    = __elfN(freebsd_trans_osrel)
  129 };
  130 
  131 static boolean_t
  132 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
  133 {
  134         uintptr_t p;
  135 
  136         p = (uintptr_t)(note + 1);
  137         p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
  138         *osrel = *(const int32_t *)(p);
  139 
  140         return (TRUE);
  141 }
  142 
  143 static const char GNU_ABI_VENDOR[] = "GNU";
  144 static int GNU_KFREEBSD_ABI_DESC = 3;
  145 
  146 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
  147         .hdr.n_namesz   = sizeof(GNU_ABI_VENDOR),
  148         .hdr.n_descsz   = 16,   /* XXX at least 16 */
  149         .hdr.n_type     = 1,
  150         .vendor         = GNU_ABI_VENDOR,
  151         .flags          = BN_TRANSLATE_OSREL,
  152         .trans_osrel    = kfreebsd_trans_osrel
  153 };
  154 
  155 static boolean_t
  156 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
  157 {
  158         const Elf32_Word *desc;
  159         uintptr_t p;
  160 
  161         p = (uintptr_t)(note + 1);
  162         p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
  163 
  164         desc = (const Elf32_Word *)p;
  165         if (desc[0] != GNU_KFREEBSD_ABI_DESC)
  166                 return (FALSE);
  167 
  168         /*
  169          * Debian GNU/kFreeBSD embed the earliest compatible kernel version
  170          * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
  171          */
  172         *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
  173 
  174         return (TRUE);
  175 }
  176 
  177 int
  178 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
  179 {
  180         int i;
  181 
  182         for (i = 0; i < MAX_BRANDS; i++) {
  183                 if (elf_brand_list[i] == NULL) {
  184                         elf_brand_list[i] = entry;
  185                         break;
  186                 }
  187         }
  188         if (i == MAX_BRANDS) {
  189                 printf("WARNING: %s: could not insert brandinfo entry: %p\n",
  190                         __func__, entry);
  191                 return (-1);
  192         }
  193         return (0);
  194 }
  195 
  196 int
  197 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
  198 {
  199         int i;
  200 
  201         for (i = 0; i < MAX_BRANDS; i++) {
  202                 if (elf_brand_list[i] == entry) {
  203                         elf_brand_list[i] = NULL;
  204                         break;
  205                 }
  206         }
  207         if (i == MAX_BRANDS)
  208                 return (-1);
  209         return (0);
  210 }
  211 
  212 int
  213 __elfN(brand_inuse)(Elf_Brandinfo *entry)
  214 {
  215         struct proc *p;
  216         int rval = FALSE;
  217 
  218         sx_slock(&allproc_lock);
  219         FOREACH_PROC_IN_SYSTEM(p) {
  220                 if (p->p_sysent == entry->sysvec) {
  221                         rval = TRUE;
  222                         break;
  223                 }
  224         }
  225         sx_sunlock(&allproc_lock);
  226 
  227         return (rval);
  228 }
  229 
  230 static Elf_Brandinfo *
  231 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
  232     int interp_name_len, int32_t *osrel)
  233 {
  234         const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
  235         Elf_Brandinfo *bi;
  236         boolean_t ret;
  237         int i;
  238 
  239         /*
  240          * We support four types of branding -- (1) the ELF EI_OSABI field
  241          * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
  242          * branding w/in the ELF header, (3) path of the `interp_path'
  243          * field, and (4) the ".note.ABI-tag" ELF section.
  244          */
  245 
  246         /* Look for an ".note.ABI-tag" ELF section */
  247         for (i = 0; i < MAX_BRANDS; i++) {
  248                 bi = elf_brand_list[i];
  249                 if (bi == NULL)
  250                         continue;
  251                 if (hdr->e_machine == bi->machine && (bi->flags &
  252                     (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
  253                         ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
  254                         if (ret)
  255                                 return (bi);
  256                 }
  257         }
  258 
  259         /* If the executable has a brand, search for it in the brand list. */
  260         for (i = 0; i < MAX_BRANDS; i++) {
  261                 bi = elf_brand_list[i];
  262                 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
  263                         continue;
  264                 if (hdr->e_machine == bi->machine &&
  265                     (hdr->e_ident[EI_OSABI] == bi->brand ||
  266                     strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
  267                     bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
  268                         return (bi);
  269         }
  270 
  271         /* Lacking a known brand, search for a recognized interpreter. */
  272         if (interp != NULL) {
  273                 for (i = 0; i < MAX_BRANDS; i++) {
  274                         bi = elf_brand_list[i];
  275                         if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
  276                                 continue;
  277                         if (hdr->e_machine == bi->machine &&
  278                             /* ELF image p_filesz includes terminating zero */
  279                             strlen(bi->interp_path) + 1 == interp_name_len &&
  280                             strncmp(interp, bi->interp_path, interp_name_len)
  281                             == 0)
  282                                 return (bi);
  283                 }
  284         }
  285 
  286         /* Lacking a recognized interpreter, try the default brand */
  287         for (i = 0; i < MAX_BRANDS; i++) {
  288                 bi = elf_brand_list[i];
  289                 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
  290                         continue;
  291                 if (hdr->e_machine == bi->machine &&
  292                     __elfN(fallback_brand) == bi->brand)
  293                         return (bi);
  294         }
  295         return (NULL);
  296 }
  297 
  298 static int
  299 __elfN(check_header)(const Elf_Ehdr *hdr)
  300 {
  301         Elf_Brandinfo *bi;
  302         int i;
  303 
  304         if (!IS_ELF(*hdr) ||
  305             hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
  306             hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
  307             hdr->e_ident[EI_VERSION] != EV_CURRENT ||
  308             hdr->e_phentsize != sizeof(Elf_Phdr) ||
  309             hdr->e_version != ELF_TARG_VER)
  310                 return (ENOEXEC);
  311 
  312         /*
  313          * Make sure we have at least one brand for this machine.
  314          */
  315 
  316         for (i = 0; i < MAX_BRANDS; i++) {
  317                 bi = elf_brand_list[i];
  318                 if (bi != NULL && bi->machine == hdr->e_machine)
  319                         break;
  320         }
  321         if (i == MAX_BRANDS)
  322                 return (ENOEXEC);
  323 
  324         return (0);
  325 }
  326 
  327 static int
  328 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  329     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
  330 {
  331         struct sf_buf *sf;
  332         int error;
  333         vm_offset_t off;
  334 
  335         /*
  336          * Create the page if it doesn't exist yet. Ignore errors.
  337          */
  338         vm_map_lock(map);
  339         vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
  340             VM_PROT_ALL, VM_PROT_ALL, 0);
  341         vm_map_unlock(map);
  342 
  343         /*
  344          * Find the page from the underlying object.
  345          */
  346         if (object) {
  347                 sf = vm_imgact_map_page(object, offset);
  348                 if (sf == NULL)
  349                         return (KERN_FAILURE);
  350                 off = offset - trunc_page(offset);
  351                 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
  352                     end - start);
  353                 vm_imgact_unmap_page(sf);
  354                 if (error) {
  355                         return (KERN_FAILURE);
  356                 }
  357         }
  358 
  359         return (KERN_SUCCESS);
  360 }
  361 
  362 static int
  363 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  364     vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
  365 {
  366         struct sf_buf *sf;
  367         vm_offset_t off;
  368         vm_size_t sz;
  369         int error, rv;
  370 
  371         if (start != trunc_page(start)) {
  372                 rv = __elfN(map_partial)(map, object, offset, start,
  373                     round_page(start), prot);
  374                 if (rv)
  375                         return (rv);
  376                 offset += round_page(start) - start;
  377                 start = round_page(start);
  378         }
  379         if (end != round_page(end)) {
  380                 rv = __elfN(map_partial)(map, object, offset +
  381                     trunc_page(end) - start, trunc_page(end), end, prot);
  382                 if (rv)
  383                         return (rv);
  384                 end = trunc_page(end);
  385         }
  386         if (end > start) {
  387                 if (offset & PAGE_MASK) {
  388                         /*
  389                          * The mapping is not page aligned. This means we have
  390                          * to copy the data. Sigh.
  391                          */
  392                         rv = vm_map_find(map, NULL, 0, &start, end - start,
  393                             FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
  394                         if (rv)
  395                                 return (rv);
  396                         if (object == NULL)
  397                                 return (KERN_SUCCESS);
  398                         for (; start < end; start += sz) {
  399                                 sf = vm_imgact_map_page(object, offset);
  400                                 if (sf == NULL)
  401                                         return (KERN_FAILURE);
  402                                 off = offset - trunc_page(offset);
  403                                 sz = end - start;
  404                                 if (sz > PAGE_SIZE - off)
  405                                         sz = PAGE_SIZE - off;
  406                                 error = copyout((caddr_t)sf_buf_kva(sf) + off,
  407                                     (caddr_t)start, sz);
  408                                 vm_imgact_unmap_page(sf);
  409                                 if (error) {
  410                                         return (KERN_FAILURE);
  411                                 }
  412                                 offset += sz;
  413                         }
  414                         rv = KERN_SUCCESS;
  415                 } else {
  416                         vm_object_reference(object);
  417                         vm_map_lock(map);
  418                         rv = vm_map_insert(map, object, offset, start, end,
  419                             prot, VM_PROT_ALL, cow);
  420                         vm_map_unlock(map);
  421                         if (rv != KERN_SUCCESS)
  422                                 vm_object_deallocate(object);
  423                 }
  424                 return (rv);
  425         } else {
  426                 return (KERN_SUCCESS);
  427         }
  428 }
  429 
  430 static int
  431 __elfN(load_section)(struct vmspace *vmspace,
  432         vm_object_t object, vm_offset_t offset,
  433         caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
  434         size_t pagesize)
  435 {
  436         struct sf_buf *sf;
  437         size_t map_len;
  438         vm_offset_t map_addr;
  439         int error, rv, cow;
  440         size_t copy_len;
  441         vm_offset_t file_addr;
  442 
  443         /*
  444          * It's necessary to fail if the filsz + offset taken from the
  445          * header is greater than the actual file pager object's size.
  446          * If we were to allow this, then the vm_map_find() below would
  447          * walk right off the end of the file object and into the ether.
  448          *
  449          * While I'm here, might as well check for something else that
  450          * is invalid: filsz cannot be greater than memsz.
  451          */
  452         if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
  453             filsz > memsz) {
  454                 uprintf("elf_load_section: truncated ELF file\n");
  455                 return (ENOEXEC);
  456         }
  457 
  458         map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
  459         file_addr = trunc_page_ps(offset, pagesize);
  460 
  461         /*
  462          * We have two choices.  We can either clear the data in the last page
  463          * of an oversized mapping, or we can start the anon mapping a page
  464          * early and copy the initialized data into that first page.  We
  465          * choose the second..
  466          */
  467         if (memsz > filsz)
  468                 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
  469         else
  470                 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
  471 
  472         if (map_len != 0) {
  473                 /* cow flags: don't dump readonly sections in core */
  474                 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
  475                     (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
  476 
  477                 rv = __elfN(map_insert)(&vmspace->vm_map,
  478                                       object,
  479                                       file_addr,        /* file offset */
  480                                       map_addr,         /* virtual start */
  481                                       map_addr + map_len,/* virtual end */
  482                                       prot,
  483                                       cow);
  484                 if (rv != KERN_SUCCESS)
  485                         return (EINVAL);
  486 
  487                 /* we can stop now if we've covered it all */
  488                 if (memsz == filsz) {
  489                         return (0);
  490                 }
  491         }
  492 
  493 
  494         /*
  495          * We have to get the remaining bit of the file into the first part
  496          * of the oversized map segment.  This is normally because the .data
  497          * segment in the file is extended to provide bss.  It's a neat idea
  498          * to try and save a page, but it's a pain in the behind to implement.
  499          */
  500         copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
  501         map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
  502         map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
  503             map_addr;
  504 
  505         /* This had damn well better be true! */
  506         if (map_len != 0) {
  507                 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
  508                     map_addr + map_len, VM_PROT_ALL, 0);
  509                 if (rv != KERN_SUCCESS) {
  510                         return (EINVAL);
  511                 }
  512         }
  513 
  514         if (copy_len != 0) {
  515                 vm_offset_t off;
  516 
  517                 sf = vm_imgact_map_page(object, offset + filsz);
  518                 if (sf == NULL)
  519                         return (EIO);
  520 
  521                 /* send the page fragment to user space */
  522                 off = trunc_page_ps(offset + filsz, pagesize) -
  523                     trunc_page(offset + filsz);
  524                 error = copyout((caddr_t)sf_buf_kva(sf) + off,
  525                     (caddr_t)map_addr, copy_len);
  526                 vm_imgact_unmap_page(sf);
  527                 if (error) {
  528                         return (error);
  529                 }
  530         }
  531 
  532         /*
  533          * set it to the specified protection.
  534          * XXX had better undo the damage from pasting over the cracks here!
  535          */
  536         vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
  537             round_page(map_addr + map_len),  prot, FALSE);
  538 
  539         return (0);
  540 }
  541 
  542 /*
  543  * Load the file "file" into memory.  It may be either a shared object
  544  * or an executable.
  545  *
  546  * The "addr" reference parameter is in/out.  On entry, it specifies
  547  * the address where a shared object should be loaded.  If the file is
  548  * an executable, this value is ignored.  On exit, "addr" specifies
  549  * where the file was actually loaded.
  550  *
  551  * The "entry" reference parameter is out only.  On exit, it specifies
  552  * the entry point for the loaded file.
  553  */
  554 static int
  555 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
  556         u_long *entry, size_t pagesize)
  557 {
  558         struct {
  559                 struct nameidata nd;
  560                 struct vattr attr;
  561                 struct image_params image_params;
  562         } *tempdata;
  563         const Elf_Ehdr *hdr = NULL;
  564         const Elf_Phdr *phdr = NULL;
  565         struct nameidata *nd;
  566         struct vmspace *vmspace = p->p_vmspace;
  567         struct vattr *attr;
  568         struct image_params *imgp;
  569         vm_prot_t prot;
  570         u_long rbase;
  571         u_long base_addr = 0;
  572         int vfslocked, error, i, numsegs;
  573 
  574         tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
  575         nd = &tempdata->nd;
  576         attr = &tempdata->attr;
  577         imgp = &tempdata->image_params;
  578 
  579         /*
  580          * Initialize part of the common data
  581          */
  582         imgp->proc = p;
  583         imgp->attr = attr;
  584         imgp->firstpage = NULL;
  585         imgp->image_header = NULL;
  586         imgp->object = NULL;
  587         imgp->execlabel = NULL;
  588 
  589         NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
  590             curthread);
  591         vfslocked = 0;
  592         if ((error = namei(nd)) != 0) {
  593                 nd->ni_vp = NULL;
  594                 goto fail;
  595         }
  596         vfslocked = NDHASGIANT(nd);
  597         NDFREE(nd, NDF_ONLY_PNBUF);
  598         imgp->vp = nd->ni_vp;
  599 
  600         /*
  601          * Check permissions, modes, uid, etc on the file, and "open" it.
  602          */
  603         error = exec_check_permissions(imgp);
  604         if (error)
  605                 goto fail;
  606 
  607         error = exec_map_first_page(imgp);
  608         if (error)
  609                 goto fail;
  610 
  611         /*
  612          * Also make certain that the interpreter stays the same, so set
  613          * its VV_TEXT flag, too.
  614          */
  615         nd->ni_vp->v_vflag |= VV_TEXT;
  616 
  617         imgp->object = nd->ni_vp->v_object;
  618 
  619         hdr = (const Elf_Ehdr *)imgp->image_header;
  620         if ((error = __elfN(check_header)(hdr)) != 0)
  621                 goto fail;
  622         if (hdr->e_type == ET_DYN)
  623                 rbase = *addr;
  624         else if (hdr->e_type == ET_EXEC)
  625                 rbase = 0;
  626         else {
  627                 error = ENOEXEC;
  628                 goto fail;
  629         }
  630 
  631         /* Only support headers that fit within first page for now      */
  632         if ((hdr->e_phoff > PAGE_SIZE) ||
  633             (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) {
  634                 error = ENOEXEC;
  635                 goto fail;
  636         }
  637 
  638         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  639         if (!aligned(phdr, Elf_Addr)) {
  640                 error = ENOEXEC;
  641                 goto fail;
  642         }
  643 
  644         for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
  645                 if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
  646                         /* Loadable segment */
  647                         prot = __elfN(trans_prot)(phdr[i].p_flags);
  648                         if ((error = __elfN(load_section)(vmspace,
  649                             imgp->object, phdr[i].p_offset,
  650                             (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
  651                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  652                             pagesize)) != 0)
  653                                 goto fail;
  654                         /*
  655                          * Establish the base address if this is the
  656                          * first segment.
  657                          */
  658                         if (numsegs == 0)
  659                                 base_addr = trunc_page(phdr[i].p_vaddr +
  660                                     rbase);
  661                         numsegs++;
  662                 }
  663         }
  664         *addr = base_addr;
  665         *entry = (unsigned long)hdr->e_entry + rbase;
  666 
  667 fail:
  668         if (imgp->firstpage)
  669                 exec_unmap_first_page(imgp);
  670 
  671         if (nd->ni_vp)
  672                 vput(nd->ni_vp);
  673 
  674         VFS_UNLOCK_GIANT(vfslocked);
  675         free(tempdata, M_TEMP);
  676 
  677         return (error);
  678 }
  679 
  680 static int
  681 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
  682 {
  683         const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
  684         const Elf_Phdr *phdr;
  685         Elf_Auxargs *elf_auxargs;
  686         struct vmspace *vmspace;
  687         vm_prot_t prot;
  688         u_long text_size = 0, data_size = 0, total_size = 0;
  689         u_long text_addr = 0, data_addr = 0;
  690         u_long seg_size, seg_addr;
  691         u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
  692         int32_t osrel = 0;
  693         int error = 0, i, n, interp_name_len = 0;
  694         const char *interp = NULL, *newinterp = NULL;
  695         Elf_Brandinfo *brand_info;
  696         char *path;
  697         struct sysentvec *sv;
  698 
  699         /*
  700          * Do we have a valid ELF header ?
  701          *
  702          * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
  703          * if particular brand doesn't support it.
  704          */
  705         if (__elfN(check_header)(hdr) != 0 ||
  706             (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
  707                 return (-1);
  708 
  709         /*
  710          * From here on down, we return an errno, not -1, as we've
  711          * detected an ELF file.
  712          */
  713 
  714         if ((hdr->e_phoff > PAGE_SIZE) ||
  715             (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) {
  716                 /* Only support headers in first page for now */
  717                 return (ENOEXEC);
  718         }
  719         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
  720         if (!aligned(phdr, Elf_Addr))
  721                 return (ENOEXEC);
  722         n = 0;
  723         baddr = 0;
  724         for (i = 0; i < hdr->e_phnum; i++) {
  725                 if (phdr[i].p_type == PT_LOAD) {
  726                         if (n == 0)
  727                                 baddr = phdr[i].p_vaddr;
  728                         n++;
  729                         continue;
  730                 }
  731                 if (phdr[i].p_type == PT_INTERP) {
  732                         /* Path to interpreter */
  733                         if (phdr[i].p_filesz > MAXPATHLEN ||
  734                             phdr[i].p_offset > PAGE_SIZE ||
  735                             phdr[i].p_filesz > PAGE_SIZE - phdr[i].p_offset)
  736                                 return (ENOEXEC);
  737                         interp = imgp->image_header + phdr[i].p_offset;
  738                         interp_name_len = phdr[i].p_filesz;
  739                         continue;
  740                 }
  741         }
  742 
  743         brand_info = __elfN(get_brandinfo)(imgp, interp, interp_name_len,
  744             &osrel);
  745         if (brand_info == NULL) {
  746                 uprintf("ELF binary type \"%u\" not known.\n",
  747                     hdr->e_ident[EI_OSABI]);
  748                 return (ENOEXEC);
  749         }
  750         if (hdr->e_type == ET_DYN) {
  751                 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
  752                         return (ENOEXEC);
  753                 /*
  754                  * Honour the base load address from the dso if it is
  755                  * non-zero for some reason.
  756                  */
  757                 if (baddr == 0)
  758                         et_dyn_addr = ET_DYN_LOAD_ADDR;
  759                 else
  760                         et_dyn_addr = 0;
  761         } else
  762                 et_dyn_addr = 0;
  763         sv = brand_info->sysvec;
  764         if (interp != NULL && brand_info->interp_newpath != NULL)
  765                 newinterp = brand_info->interp_newpath;
  766 
  767         /*
  768          * Avoid a possible deadlock if the current address space is destroyed
  769          * and that address space maps the locked vnode.  In the common case,
  770          * the locked vnode's v_usecount is decremented but remains greater
  771          * than zero.  Consequently, the vnode lock is not needed by vrele().
  772          * However, in cases where the vnode lock is external, such as nullfs,
  773          * v_usecount may become zero.
  774          */
  775         VOP_UNLOCK(imgp->vp, 0);
  776 
  777         error = exec_new_vmspace(imgp, sv);
  778         imgp->proc->p_sysent = sv;
  779 
  780         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
  781         if (error)
  782                 return (error);
  783 
  784         vmspace = imgp->proc->p_vmspace;
  785 
  786         for (i = 0; i < hdr->e_phnum; i++) {
  787                 switch (phdr[i].p_type) {
  788                 case PT_LOAD:   /* Loadable segment */
  789                         if (phdr[i].p_memsz == 0)
  790                                 break;
  791                         prot = __elfN(trans_prot)(phdr[i].p_flags);
  792 
  793 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
  794                         /*
  795                          * Some x86 binaries assume read == executable,
  796                          * notably the M3 runtime and therefore cvsup
  797                          */
  798                         if (prot & VM_PROT_READ)
  799                                 prot |= VM_PROT_EXECUTE;
  800 #endif
  801 
  802                         if ((error = __elfN(load_section)(vmspace,
  803                             imgp->object, phdr[i].p_offset,
  804                             (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
  805                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
  806                             sv->sv_pagesize)) != 0)
  807                                 return (error);
  808 
  809                         /*
  810                          * If this segment contains the program headers,
  811                          * remember their virtual address for the AT_PHDR
  812                          * aux entry. Static binaries don't usually include
  813                          * a PT_PHDR entry.
  814                          */
  815                         if (phdr[i].p_offset == 0 &&
  816                             hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
  817                                 <= phdr[i].p_filesz)
  818                                 proghdr = phdr[i].p_vaddr + hdr->e_phoff +
  819                                     et_dyn_addr;
  820 
  821                         seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
  822                         seg_size = round_page(phdr[i].p_memsz +
  823                             phdr[i].p_vaddr + et_dyn_addr - seg_addr);
  824 
  825                         /*
  826                          * Is this .text or .data?  We can't use
  827                          * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
  828                          * alpha terribly and possibly does other bad
  829                          * things so we stick to the old way of figuring
  830                          * it out:  If the segment contains the program
  831                          * entry point, it's a text segment, otherwise it
  832                          * is a data segment.
  833                          *
  834                          * Note that obreak() assumes that data_addr + 
  835                          * data_size == end of data load area, and the ELF
  836                          * file format expects segments to be sorted by
  837                          * address.  If multiple data segments exist, the
  838                          * last one will be used.
  839                          */
  840                         if (hdr->e_entry >= phdr[i].p_vaddr &&
  841                             hdr->e_entry < (phdr[i].p_vaddr +
  842                             phdr[i].p_memsz)) {
  843                                 text_size = seg_size;
  844                                 text_addr = seg_addr;
  845                                 entry = (u_long)hdr->e_entry + et_dyn_addr;
  846                         } else {
  847                                 data_size = seg_size;
  848                                 data_addr = seg_addr;
  849                         }
  850                         total_size += seg_size;
  851                         break;
  852                 case PT_PHDR:   /* Program header table info */
  853                         proghdr = phdr[i].p_vaddr + et_dyn_addr;
  854                         break;
  855                 default:
  856                         break;
  857                 }
  858         }
  859         
  860         if (data_addr == 0 && data_size == 0) {
  861                 data_addr = text_addr;
  862                 data_size = text_size;
  863         }
  864 
  865         /*
  866          * Check limits.  It should be safe to check the
  867          * limits after loading the segments since we do
  868          * not actually fault in all the segments pages.
  869          */
  870         PROC_LOCK(imgp->proc);
  871         if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
  872             text_size > maxtsiz ||
  873             total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
  874                 PROC_UNLOCK(imgp->proc);
  875                 return (ENOMEM);
  876         }
  877 
  878         vmspace->vm_tsize = text_size >> PAGE_SHIFT;
  879         vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
  880         vmspace->vm_dsize = data_size >> PAGE_SHIFT;
  881         vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
  882 
  883         /*
  884          * We load the dynamic linker where a userland call
  885          * to mmap(0, ...) would put it.  The rationale behind this
  886          * calculation is that it leaves room for the heap to grow to
  887          * its maximum allowed size.
  888          */
  889         addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
  890             lim_max(imgp->proc, RLIMIT_DATA));
  891         PROC_UNLOCK(imgp->proc);
  892 
  893         imgp->entry_addr = entry;
  894 
  895         if (interp != NULL) {
  896                 int have_interp = FALSE;
  897                 VOP_UNLOCK(imgp->vp, 0);
  898                 if (brand_info->emul_path != NULL &&
  899                     brand_info->emul_path[0] != '\0') {
  900                         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
  901                         snprintf(path, MAXPATHLEN, "%s%s",
  902                             brand_info->emul_path, interp);
  903                         error = __elfN(load_file)(imgp->proc, path, &addr,
  904                             &imgp->entry_addr, sv->sv_pagesize);
  905                         free(path, M_TEMP);
  906                         if (error == 0)
  907                                 have_interp = TRUE;
  908                 }
  909                 if (!have_interp && newinterp != NULL) {
  910                         error = __elfN(load_file)(imgp->proc, newinterp, &addr,
  911                             &imgp->entry_addr, sv->sv_pagesize);
  912                         if (error == 0)
  913                                 have_interp = TRUE;
  914                 }
  915                 if (!have_interp) {
  916                         error = __elfN(load_file)(imgp->proc, interp, &addr,
  917                             &imgp->entry_addr, sv->sv_pagesize);
  918                 }
  919                 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
  920                 if (error != 0) {
  921                         uprintf("ELF interpreter %s not found\n", interp);
  922                         return (error);
  923                 }
  924         } else
  925                 addr = et_dyn_addr;
  926 
  927         /*
  928          * Construct auxargs table (used by the fixup routine)
  929          */
  930         elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
  931         elf_auxargs->execfd = -1;
  932         elf_auxargs->phdr = proghdr;
  933         elf_auxargs->phent = hdr->e_phentsize;
  934         elf_auxargs->phnum = hdr->e_phnum;
  935         elf_auxargs->pagesz = PAGE_SIZE;
  936         elf_auxargs->base = addr;
  937         elf_auxargs->flags = 0;
  938         elf_auxargs->entry = entry;
  939 
  940         imgp->auxargs = elf_auxargs;
  941         imgp->interpreted = 0;
  942         imgp->proc->p_osrel = osrel;
  943 
  944         return (error);
  945 }
  946 
  947 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
  948 
  949 int
  950 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
  951 {
  952         Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
  953         Elf_Addr *base;
  954         Elf_Addr *pos;
  955 
  956         base = (Elf_Addr *)*stack_base;
  957         pos = base + (imgp->args->argc + imgp->args->envc + 2);
  958 
  959         if (args->execfd != -1)
  960                 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
  961         AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
  962         AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
  963         AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
  964         AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
  965         AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
  966         AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
  967         AUXARGS_ENTRY(pos, AT_BASE, args->base);
  968         if (imgp->execpathp != 0)
  969                 AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
  970         AUXARGS_ENTRY(pos, AT_NULL, 0);
  971 
  972         free(imgp->auxargs, M_TEMP);
  973         imgp->auxargs = NULL;
  974 
  975         base--;
  976         suword(base, (long)imgp->args->argc);
  977         *stack_base = (register_t *)base;
  978         return (0);
  979 }
  980 
  981 /*
  982  * Code for generating ELF core dumps.
  983  */
  984 
  985 typedef void (*segment_callback)(vm_map_entry_t, void *);
  986 
  987 /* Closure for cb_put_phdr(). */
  988 struct phdr_closure {
  989         Elf_Phdr *phdr;         /* Program header to fill in */
  990         Elf_Off offset;         /* Offset of segment in core file */
  991 };
  992 
  993 /* Closure for cb_size_segment(). */
  994 struct sseg_closure {
  995         int count;              /* Count of writable segments. */
  996         size_t size;            /* Total size of all writable segments. */
  997 };
  998 
  999 static void cb_put_phdr(vm_map_entry_t, void *);
 1000 static void cb_size_segment(vm_map_entry_t, void *);
 1001 static void each_writable_segment(struct thread *, segment_callback, void *);
 1002 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
 1003     int, void *, size_t);
 1004 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
 1005 static void __elfN(putnote)(void *, size_t *, const char *, int,
 1006     const void *, size_t);
 1007 
 1008 int
 1009 __elfN(coredump)(td, vp, limit)
 1010         struct thread *td;
 1011         struct vnode *vp;
 1012         off_t limit;
 1013 {
 1014         struct ucred *cred = td->td_ucred;
 1015         int error = 0;
 1016         struct sseg_closure seginfo;
 1017         void *hdr;
 1018         size_t hdrsize;
 1019 
 1020         /* Size the program segments. */
 1021         seginfo.count = 0;
 1022         seginfo.size = 0;
 1023         each_writable_segment(td, cb_size_segment, &seginfo);
 1024 
 1025         /*
 1026          * Calculate the size of the core file header area by making
 1027          * a dry run of generating it.  Nothing is written, but the
 1028          * size is calculated.
 1029          */
 1030         hdrsize = 0;
 1031         __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
 1032 
 1033         if (hdrsize + seginfo.size >= limit)
 1034                 return (EFAULT);
 1035 
 1036         /*
 1037          * Allocate memory for building the header, fill it up,
 1038          * and write it out.
 1039          */
 1040         hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
 1041         if (hdr == NULL) {
 1042                 return (EINVAL);
 1043         }
 1044         error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
 1045 
 1046         /* Write the contents of all of the writable segments. */
 1047         if (error == 0) {
 1048                 Elf_Phdr *php;
 1049                 off_t offset;
 1050                 int i;
 1051 
 1052                 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
 1053                 offset = hdrsize;
 1054                 for (i = 0; i < seginfo.count; i++) {
 1055                         error = vn_rdwr_inchunks(UIO_WRITE, vp,
 1056                             (caddr_t)(uintptr_t)php->p_vaddr,
 1057                             php->p_filesz, offset, UIO_USERSPACE,
 1058                             IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
 1059                             curthread);
 1060                         if (error != 0)
 1061                                 break;
 1062                         offset += php->p_filesz;
 1063                         php++;
 1064                 }
 1065         }
 1066         free(hdr, M_TEMP);
 1067 
 1068         return (error);
 1069 }
 1070 
 1071 /*
 1072  * A callback for each_writable_segment() to write out the segment's
 1073  * program header entry.
 1074  */
 1075 static void
 1076 cb_put_phdr(entry, closure)
 1077         vm_map_entry_t entry;
 1078         void *closure;
 1079 {
 1080         struct phdr_closure *phc = (struct phdr_closure *)closure;
 1081         Elf_Phdr *phdr = phc->phdr;
 1082 
 1083         phc->offset = round_page(phc->offset);
 1084 
 1085         phdr->p_type = PT_LOAD;
 1086         phdr->p_offset = phc->offset;
 1087         phdr->p_vaddr = entry->start;
 1088         phdr->p_paddr = 0;
 1089         phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
 1090         phdr->p_align = PAGE_SIZE;
 1091         phdr->p_flags = __elfN(untrans_prot)(entry->protection);
 1092 
 1093         phc->offset += phdr->p_filesz;
 1094         phc->phdr++;
 1095 }
 1096 
 1097 /*
 1098  * A callback for each_writable_segment() to gather information about
 1099  * the number of segments and their total size.
 1100  */
 1101 static void
 1102 cb_size_segment(entry, closure)
 1103         vm_map_entry_t entry;
 1104         void *closure;
 1105 {
 1106         struct sseg_closure *ssc = (struct sseg_closure *)closure;
 1107 
 1108         ssc->count++;
 1109         ssc->size += entry->end - entry->start;
 1110 }
 1111 
 1112 /*
 1113  * For each writable segment in the process's memory map, call the given
 1114  * function with a pointer to the map entry and some arbitrary
 1115  * caller-supplied data.
 1116  */
 1117 static void
 1118 each_writable_segment(td, func, closure)
 1119         struct thread *td;
 1120         segment_callback func;
 1121         void *closure;
 1122 {
 1123         struct proc *p = td->td_proc;
 1124         vm_map_t map = &p->p_vmspace->vm_map;
 1125         vm_map_entry_t entry;
 1126         vm_object_t backing_object, object;
 1127         boolean_t ignore_entry;
 1128 
 1129         vm_map_lock_read(map);
 1130         for (entry = map->header.next; entry != &map->header;
 1131             entry = entry->next) {
 1132                 /*
 1133                  * Don't dump inaccessible mappings, deal with legacy
 1134                  * coredump mode.
 1135                  *
 1136                  * Note that read-only segments related to the elf binary
 1137                  * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
 1138                  * need to arbitrarily ignore such segments.
 1139                  */
 1140                 if (elf_legacy_coredump) {
 1141                         if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
 1142                                 continue;
 1143                 } else {
 1144                         if ((entry->protection & VM_PROT_ALL) == 0)
 1145                                 continue;
 1146                 }
 1147 
 1148                 /*
 1149                  * Dont include memory segment in the coredump if
 1150                  * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
 1151                  * madvise(2).  Do not dump submaps (i.e. parts of the
 1152                  * kernel map).
 1153                  */
 1154                 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
 1155                         continue;
 1156 
 1157                 if ((object = entry->object.vm_object) == NULL)
 1158                         continue;
 1159 
 1160                 /* Ignore memory-mapped devices and such things. */
 1161                 VM_OBJECT_LOCK(object);
 1162                 while ((backing_object = object->backing_object) != NULL) {
 1163                         VM_OBJECT_LOCK(backing_object);
 1164                         VM_OBJECT_UNLOCK(object);
 1165                         object = backing_object;
 1166                 }
 1167                 ignore_entry = object->type != OBJT_DEFAULT &&
 1168                     object->type != OBJT_SWAP && object->type != OBJT_VNODE;
 1169                 VM_OBJECT_UNLOCK(object);
 1170                 if (ignore_entry)
 1171                         continue;
 1172 
 1173                 (*func)(entry, closure);
 1174         }
 1175         vm_map_unlock_read(map);
 1176 }
 1177 
 1178 /*
 1179  * Write the core file header to the file, including padding up to
 1180  * the page boundary.
 1181  */
 1182 static int
 1183 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
 1184         struct thread *td;
 1185         struct vnode *vp;
 1186         struct ucred *cred;
 1187         int numsegs;
 1188         size_t hdrsize;
 1189         void *hdr;
 1190 {
 1191         size_t off;
 1192 
 1193         /* Fill in the header. */
 1194         bzero(hdr, hdrsize);
 1195         off = 0;
 1196         __elfN(puthdr)(td, hdr, &off, numsegs);
 1197 
 1198         /* Write it to the core file. */
 1199         return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
 1200             UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
 1201             td));
 1202 }
 1203 
 1204 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
 1205 #include <compat/freebsd32/freebsd32.h>
 1206 
 1207 typedef struct prstatus32 elf_prstatus_t;
 1208 typedef struct prpsinfo32 elf_prpsinfo_t;
 1209 typedef struct fpreg32 elf_prfpregset_t;
 1210 typedef struct fpreg32 elf_fpregset_t;
 1211 typedef struct reg32 elf_gregset_t;
 1212 typedef struct thrmisc32 elf_thrmisc_t;
 1213 #else
 1214 typedef prstatus_t elf_prstatus_t;
 1215 typedef prpsinfo_t elf_prpsinfo_t;
 1216 typedef prfpregset_t elf_prfpregset_t;
 1217 typedef prfpregset_t elf_fpregset_t;
 1218 typedef gregset_t elf_gregset_t;
 1219 typedef thrmisc_t elf_thrmisc_t;
 1220 #endif
 1221 
 1222 static void
 1223 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
 1224 {
 1225         struct {
 1226                 elf_prstatus_t status;
 1227                 elf_prfpregset_t fpregset;
 1228                 elf_prpsinfo_t psinfo;
 1229                 elf_thrmisc_t thrmisc;
 1230         } *tempdata;
 1231         elf_prstatus_t *status;
 1232         elf_prfpregset_t *fpregset;
 1233         elf_prpsinfo_t *psinfo;
 1234         elf_thrmisc_t *thrmisc;
 1235         struct proc *p;
 1236         struct thread *thr;
 1237         size_t ehoff, noteoff, notesz, phoff;
 1238 
 1239         p = td->td_proc;
 1240 
 1241         ehoff = *off;
 1242         *off += sizeof(Elf_Ehdr);
 1243 
 1244         phoff = *off;
 1245         *off += (numsegs + 1) * sizeof(Elf_Phdr);
 1246 
 1247         noteoff = *off;
 1248         /*
 1249          * Don't allocate space for the notes if we're just calculating
 1250          * the size of the header. We also don't collect the data.
 1251          */
 1252         if (dst != NULL) {
 1253                 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
 1254                 status = &tempdata->status;
 1255                 fpregset = &tempdata->fpregset;
 1256                 psinfo = &tempdata->psinfo;
 1257                 thrmisc = &tempdata->thrmisc;
 1258         } else {
 1259                 tempdata = NULL;
 1260                 status = NULL;
 1261                 fpregset = NULL;
 1262                 psinfo = NULL;
 1263                 thrmisc = NULL;
 1264         }
 1265 
 1266         if (dst != NULL) {
 1267                 psinfo->pr_version = PRPSINFO_VERSION;
 1268                 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
 1269                 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
 1270                 /*
 1271                  * XXX - We don't fill in the command line arguments properly
 1272                  * yet.
 1273                  */
 1274                 strlcpy(psinfo->pr_psargs, p->p_comm,
 1275                     sizeof(psinfo->pr_psargs));
 1276         }
 1277         __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
 1278             sizeof *psinfo);
 1279 
 1280         /*
 1281          * To have the debugger select the right thread (LWP) as the initial
 1282          * thread, we dump the state of the thread passed to us in td first.
 1283          * This is the thread that causes the core dump and thus likely to
 1284          * be the right thread one wants to have selected in the debugger.
 1285          */
 1286         thr = td;
 1287         while (thr != NULL) {
 1288                 if (dst != NULL) {
 1289                         status->pr_version = PRSTATUS_VERSION;
 1290                         status->pr_statussz = sizeof(elf_prstatus_t);
 1291                         status->pr_gregsetsz = sizeof(elf_gregset_t);
 1292                         status->pr_fpregsetsz = sizeof(elf_fpregset_t);
 1293                         status->pr_osreldate = osreldate;
 1294                         status->pr_cursig = p->p_sig;
 1295                         status->pr_pid = thr->td_tid;
 1296 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
 1297                         fill_regs32(thr, &status->pr_reg);
 1298                         fill_fpregs32(thr, fpregset);
 1299 #else
 1300                         fill_regs(thr, &status->pr_reg);
 1301                         fill_fpregs(thr, fpregset);
 1302 #endif
 1303                         memset(&thrmisc->_pad, 0, sizeof (thrmisc->_pad));
 1304                         strcpy(thrmisc->pr_tname, thr->td_name);
 1305                 }
 1306                 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
 1307                     sizeof *status);
 1308                 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
 1309                     sizeof *fpregset);
 1310                 __elfN(putnote)(dst, off, "FreeBSD", NT_THRMISC, thrmisc,
 1311                     sizeof *thrmisc);
 1312                 /*
 1313                  * Allow for MD specific notes, as well as any MD
 1314                  * specific preparations for writing MI notes.
 1315                  */
 1316                 __elfN(dump_thread)(thr, dst, off);
 1317 
 1318                 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
 1319                     TAILQ_NEXT(thr, td_plist);
 1320                 if (thr == td)
 1321                         thr = TAILQ_NEXT(thr, td_plist);
 1322         }
 1323 
 1324         notesz = *off - noteoff;
 1325 
 1326         if (dst != NULL)
 1327                 free(tempdata, M_TEMP);
 1328 
 1329         /* Align up to a page boundary for the program segments. */
 1330         *off = round_page(*off);
 1331 
 1332         if (dst != NULL) {
 1333                 Elf_Ehdr *ehdr;
 1334                 Elf_Phdr *phdr;
 1335                 struct phdr_closure phc;
 1336 
 1337                 /*
 1338                  * Fill in the ELF header.
 1339                  */
 1340                 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
 1341                 ehdr->e_ident[EI_MAG0] = ELFMAG0;
 1342                 ehdr->e_ident[EI_MAG1] = ELFMAG1;
 1343                 ehdr->e_ident[EI_MAG2] = ELFMAG2;
 1344                 ehdr->e_ident[EI_MAG3] = ELFMAG3;
 1345                 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
 1346                 ehdr->e_ident[EI_DATA] = ELF_DATA;
 1347                 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
 1348                 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
 1349                 ehdr->e_ident[EI_ABIVERSION] = 0;
 1350                 ehdr->e_ident[EI_PAD] = 0;
 1351                 ehdr->e_type = ET_CORE;
 1352 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
 1353                 ehdr->e_machine = ELF_ARCH32;
 1354 #else
 1355                 ehdr->e_machine = ELF_ARCH;
 1356 #endif
 1357                 ehdr->e_version = EV_CURRENT;
 1358                 ehdr->e_entry = 0;
 1359                 ehdr->e_phoff = phoff;
 1360                 ehdr->e_flags = 0;
 1361                 ehdr->e_ehsize = sizeof(Elf_Ehdr);
 1362                 ehdr->e_phentsize = sizeof(Elf_Phdr);
 1363                 ehdr->e_phnum = numsegs + 1;
 1364                 ehdr->e_shentsize = sizeof(Elf_Shdr);
 1365                 ehdr->e_shnum = 0;
 1366                 ehdr->e_shstrndx = SHN_UNDEF;
 1367 
 1368                 /*
 1369                  * Fill in the program header entries.
 1370                  */
 1371                 phdr = (Elf_Phdr *)((char *)dst + phoff);
 1372 
 1373                 /* The note segement. */
 1374                 phdr->p_type = PT_NOTE;
 1375                 phdr->p_offset = noteoff;
 1376                 phdr->p_vaddr = 0;
 1377                 phdr->p_paddr = 0;
 1378                 phdr->p_filesz = notesz;
 1379                 phdr->p_memsz = 0;
 1380                 phdr->p_flags = 0;
 1381                 phdr->p_align = 0;
 1382                 phdr++;
 1383 
 1384                 /* All the writable segments from the program. */
 1385                 phc.phdr = phdr;
 1386                 phc.offset = *off;
 1387                 each_writable_segment(td, cb_put_phdr, &phc);
 1388         }
 1389 }
 1390 
 1391 static void
 1392 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
 1393     const void *desc, size_t descsz)
 1394 {
 1395         Elf_Note note;
 1396 
 1397         note.n_namesz = strlen(name) + 1;
 1398         note.n_descsz = descsz;
 1399         note.n_type = type;
 1400         if (dst != NULL)
 1401                 bcopy(&note, (char *)dst + *off, sizeof note);
 1402         *off += sizeof note;
 1403         if (dst != NULL)
 1404                 bcopy(name, (char *)dst + *off, note.n_namesz);
 1405         *off += roundup2(note.n_namesz, sizeof(Elf_Size));
 1406         if (dst != NULL)
 1407                 bcopy(desc, (char *)dst + *off, note.n_descsz);
 1408         *off += roundup2(note.n_descsz, sizeof(Elf_Size));
 1409 }
 1410 
 1411 static boolean_t
 1412 __elfN(parse_notes)(struct image_params *imgp, Elf_Brandnote *checknote,
 1413     int32_t *osrel, const Elf_Phdr *pnote)
 1414 {
 1415         const Elf_Note *note, *note0, *note_end;
 1416         const char *note_name;
 1417         int i;
 1418 
 1419         if (pnote == NULL || pnote->p_offset > PAGE_SIZE ||
 1420             pnote->p_filesz > PAGE_SIZE - pnote->p_offset)
 1421                 return (FALSE);
 1422 
 1423         note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
 1424         note_end = (const Elf_Note *)(imgp->image_header +
 1425             pnote->p_offset + pnote->p_filesz);
 1426         for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
 1427                 if (!aligned(note, Elf32_Addr) || (const char *)note_end -
 1428                     (const char *)note < sizeof(Elf_Note))
 1429                         return (FALSE);
 1430                 if (note->n_namesz != checknote->hdr.n_namesz ||
 1431                     note->n_descsz != checknote->hdr.n_descsz ||
 1432                     note->n_type != checknote->hdr.n_type)
 1433                         goto nextnote;
 1434                 note_name = (const char *)(note + 1);
 1435                 if (note_name + checknote->hdr.n_namesz >=
 1436                     (const char *)note_end || strncmp(checknote->vendor,
 1437                     note_name, checknote->hdr.n_namesz) != 0)
 1438                         goto nextnote;
 1439 
 1440                 /*
 1441                  * Fetch the osreldate for binary
 1442                  * from the ELF OSABI-note if necessary.
 1443                  */
 1444                 if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
 1445                     checknote->trans_osrel != NULL)
 1446                         return (checknote->trans_osrel(note, osrel));
 1447                 return (TRUE);
 1448 
 1449 nextnote:
 1450                 note = (const Elf_Note *)((const char *)(note + 1) +
 1451                     roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
 1452                     roundup2(note->n_descsz, sizeof(Elf32_Addr)));
 1453         }
 1454 
 1455         return (FALSE);
 1456 }
 1457 
 1458 /*
 1459  * Try to find the appropriate ABI-note section for checknote,
 1460  * fetch the osreldate for binary from the ELF OSABI-note. Only the
 1461  * first page of the image is searched, the same as for headers.
 1462  */
 1463 static boolean_t
 1464 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
 1465     int32_t *osrel)
 1466 {
 1467         const Elf_Phdr *phdr;
 1468         const Elf_Ehdr *hdr;
 1469         int i;
 1470 
 1471         hdr = (const Elf_Ehdr *)imgp->image_header;
 1472         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
 1473 
 1474         for (i = 0; i < hdr->e_phnum; i++) {
 1475                 if (phdr[i].p_type == PT_NOTE &&
 1476                     __elfN(parse_notes)(imgp, checknote, osrel, &phdr[i]))
 1477                         return (TRUE);
 1478         }
 1479         return (FALSE);
 1480 
 1481 }
 1482 
 1483 /*
 1484  * Tell kern_execve.c about it, with a little help from the linker.
 1485  */
 1486 static struct execsw __elfN(execsw) = {
 1487         __CONCAT(exec_, __elfN(imgact)),
 1488         __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
 1489 };
 1490 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
 1491 
 1492 static vm_prot_t
 1493 __elfN(trans_prot)(Elf_Word flags)
 1494 {
 1495         vm_prot_t prot;
 1496 
 1497         prot = 0;
 1498         if (flags & PF_X)
 1499                 prot |= VM_PROT_EXECUTE;
 1500         if (flags & PF_W)
 1501                 prot |= VM_PROT_WRITE;
 1502         if (flags & PF_R)
 1503                 prot |= VM_PROT_READ;
 1504 #if __ELF_WORD_SIZE == 32
 1505 #if defined(__amd64__) || defined(__ia64__)
 1506         if (i386_read_exec && (flags & PF_R))
 1507                 prot |= VM_PROT_EXECUTE;
 1508 #endif
 1509 #endif
 1510         return (prot);
 1511 }
 1512 
 1513 static Elf_Word
 1514 __elfN(untrans_prot)(vm_prot_t prot)
 1515 {
 1516         Elf_Word flags;
 1517 
 1518         flags = 0;
 1519         if (prot & VM_PROT_EXECUTE)
 1520                 flags |= PF_X;
 1521         if (prot & VM_PROT_READ)
 1522                 flags |= PF_R;
 1523         if (prot & VM_PROT_WRITE)
 1524                 flags |= PF_W;
 1525         return (flags);
 1526 }

Cache object: 259226a9c932e474b19b1421addb58ca


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.