The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bsd/kern/mach_loader.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /*
   26  *      Copyright (C) 1988, 1989,  NeXT, Inc.
   27  *
   28  *      File:   kern/mach_loader.c
   29  *      Author: Avadis Tevanian, Jr.
   30  *
   31  *      Mach object file loader (kernel version, for now).
   32  *
   33  * 21-Jul-88  Avadis Tevanian, Jr. (avie) at NeXT
   34  *      Started.
   35  */
   36 #include <sys/param.h>
   37 #include <sys/vnode.h>
   38 #include <sys/uio.h>
   39 #include <sys/namei.h>
   40 #include <sys/proc.h>
   41 #include <sys/stat.h>
   42 #include <sys/malloc.h>
   43 #include <sys/mount.h>
   44 #include <sys/fcntl.h>
   45 #include <sys/ubc.h>
   46 
   47 #include <mach/mach_types.h>
   48 
   49 #include <kern/mach_loader.h>
   50 #include <kern/task.h>
   51 
   52 #include <mach-o/fat.h>
   53 #include <mach-o/loader.h>
   54 
   55 #include <kern/cpu_number.h>
   56 
   57 #include <vm/vm_map.h>
   58 #include <vm/vm_kern.h>
   59 #include <vm/vm_pager.h>
   60 #include <vm/vnode_pager.h>
   61 #include <mach/vm_statistics.h>
   62 
   63 #include <mach/shared_memory_server.h>
   64 #include <vm/vm_shared_memory_server.h>
   65 
   66 #include <machine/vmparam.h>
   67 
   68 /*
   69  * Prototypes of static functions.
   70  */
   71 static
   72 load_return_t
   73 parse_machfile(
   74         struct vnode            *vp,
   75         vm_map_t                        map,
   76         thread_act_t            thr_act,
   77         struct mach_header      *header,
   78         unsigned long           file_offset,
   79         unsigned long           macho_size,
   80         int                                     depth,
   81         load_result_t           *result,
   82         boolean_t               clean_regions
   83 ),
   84 load_segment(
   85         struct segment_command  *scp,
   86         void *                                  pager,
   87         unsigned long                   pager_offset,
   88         unsigned long                   macho_size,
   89         unsigned long                   end_of_file,
   90         vm_map_t                                map,
   91         load_result_t                   *result
   92 ),
   93 load_unixthread(
   94         struct thread_command   *tcp,
   95         thread_act_t                    thr_act,
   96         load_result_t                   *result
   97 ),
   98 load_thread(
   99         struct thread_command   *tcp,
  100         thread_act_t                    thr_act,
  101         load_result_t                   *result
  102 ),
  103 load_threadstate(
  104         thread_t                thread,
  105         unsigned long   *ts,
  106         unsigned long   total_size
  107 ),
  108 load_threadstack(
  109         thread_t                thread,
  110         unsigned long   *ts,
  111         unsigned long   total_size,
  112         vm_offset_t             *user_stack,
  113         int                             *customstack
  114 ),
  115 load_threadentry(
  116         thread_t                thread,
  117         unsigned long   *ts,
  118         unsigned long   total_size,
  119         vm_offset_t             *entry_point
  120 ),
  121 load_dylinker(
  122         struct dylinker_command *lcp,
  123         vm_map_t                                map,
  124         thread_act_t                    thr_act,
  125         int                                             depth,
  126         load_result_t                   *result,
  127         boolean_t                       clean_regions
  128 ),
  129 get_macho_vnode(
  130         char                            *path,
  131         struct mach_header      *mach_header,
  132         unsigned long           *file_offset,
  133         unsigned long           *macho_size,
  134         struct vnode            **vpp
  135 );
  136 
  137 load_return_t
  138 load_machfile(
  139         struct vnode            *vp,
  140         struct mach_header      *header,
  141         unsigned long           file_offset,
  142         unsigned long           macho_size,
  143         load_result_t           *result,
  144         thread_act_t            thr_act,
  145         vm_map_t                new_map,
  146         boolean_t               clean_regions
  147 )
  148 {
  149         pmap_t                  pmap;
  150         vm_map_t                map;
  151         vm_map_t                old_map;
  152         load_result_t           myresult;
  153         kern_return_t           kret;
  154         load_return_t           lret;
  155         boolean_t create_map = TRUE;
  156 #ifndef i386
  157         extern pmap_t pmap_create(vm_size_t   size); /* XXX */
  158 #endif
  159 
  160         if (new_map != VM_MAP_NULL) {
  161                 create_map = FALSE;
  162         }
  163 
  164         if (create_map) {
  165                 old_map = current_map();
  166 #ifdef i386
  167                 pmap = get_task_pmap(current_task());
  168                 pmap_reference(pmap);
  169 #else
  170                 pmap = pmap_create((vm_size_t) 0);
  171 #endif
  172                 map = vm_map_create(pmap,
  173                                 get_map_min(old_map),
  174                                 get_map_max(old_map),
  175                                 TRUE); /**** FIXME ****/
  176         } else
  177                 map = new_map;
  178                 
  179         if (!result)
  180                 result = &myresult;
  181 
  182         *result = (load_result_t) { 0 };
  183 
  184         lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size,
  185                              0, result, clean_regions);
  186 
  187         if (lret != LOAD_SUCCESS) {
  188                 if (create_map) {
  189                         vm_map_deallocate(map); /* will lose pmap reference too */
  190                 }
  191                 return(lret);
  192         }
  193 
  194         /*
  195          *      Commit to new map.  First make sure that the current
  196          *      users of the task get done with it, and that we clean
  197          *      up the old contents of IPC and memory.  The task is
  198          *      guaranteed to be single threaded upon return (us).
  199          *
  200          *      Swap the new map for the old, which  consumes our new map
  201          *      reference but each leaves us responsible for the old_map reference.
  202          *      That lets us get off the pmap associated with it, and
  203          *      then we can release it.
  204          */
  205          if (create_map) {
  206                 task_halt(current_task());
  207 
  208                 old_map = swap_task_map(current_task(), map);
  209 #ifndef i386
  210                 pmap_switch(pmap);      /* Make sure we are using the new pmap */
  211 #endif
  212                 vm_map_deallocate(old_map);
  213         }
  214         return(LOAD_SUCCESS);
  215 }
  216 
  217 int     dylink_test = 1;
  218 
  219 static
  220 load_return_t
  221 parse_machfile(
  222         struct vnode            *vp,
  223         vm_map_t                map,
  224         thread_act_t            thr_act,
  225         struct mach_header      *header,
  226         unsigned long           file_offset,
  227         unsigned long           macho_size,
  228         int                     depth,
  229         load_result_t           *result,
  230         boolean_t               clean_regions
  231 )
  232 {
  233         struct machine_slot     *ms;
  234         int                     ncmds;
  235         struct load_command     *lcp, *next;
  236         struct dylinker_command *dlp = 0;
  237         void *                  pager;
  238         load_return_t           ret = LOAD_SUCCESS;
  239         vm_offset_t             addr, kl_addr;
  240         vm_size_t               size,kl_size;
  241         int                     offset;
  242         int                     pass;
  243         struct proc *p = current_proc();                /* XXXX */
  244         int                     error;
  245         int resid=0;
  246         task_t task;
  247 
  248         /*
  249          *      Break infinite recursion
  250          */
  251         if (depth > 6)
  252                 return(LOAD_FAILURE);
  253 
  254         task = (task_t)get_threadtask(thr_act);
  255 
  256         depth++;
  257 
  258         /*
  259          *      Check to see if right machine type.
  260          */
  261         ms = &machine_slot[cpu_number()];
  262         if ((header->cputype != ms->cpu_type) ||
  263             !check_cpu_subtype(header->cpusubtype))
  264                 return(LOAD_BADARCH);
  265                 
  266         switch (header->filetype) {
  267         
  268         case MH_OBJECT:
  269         case MH_EXECUTE:
  270         case MH_PRELOAD:
  271                 if (depth != 1)
  272                         return (LOAD_FAILURE);
  273                 break;
  274                 
  275         case MH_FVMLIB:
  276         case MH_DYLIB:
  277                 if (depth == 1)
  278                         return (LOAD_FAILURE);
  279                 break;
  280 
  281         case MH_DYLINKER:
  282                 if (depth != 2)
  283                         return (LOAD_FAILURE);
  284                 break;
  285                 
  286         default:
  287                 return (LOAD_FAILURE);
  288         }
  289 
  290         /*
  291          *      Get the pager for the file.
  292          */
  293         UBCINFOCHECK("parse_machfile", vp);
  294         pager = (void *) ubc_getpager(vp);
  295 
  296         /*
  297          *      Map portion that must be accessible directly into
  298          *      kernel's map.
  299          */
  300         if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size)
  301                 return(LOAD_BADMACHO);
  302 
  303         /*
  304          *      Round size of Mach-O commands up to page boundry.
  305          */
  306         size = round_page_32(sizeof (struct mach_header) + header->sizeofcmds);
  307         if (size <= 0)
  308                 return(LOAD_BADMACHO);
  309 
  310         /*
  311          * Map the load commands into kernel memory.
  312          */
  313         addr = 0;
  314         kl_size = size;
  315         kl_addr = kalloc(size);
  316         addr = kl_addr;
  317         if (addr == NULL)
  318                 return(LOAD_NOSPACE);
  319 
  320         if(error = vn_rdwr(UIO_READ, vp, (caddr_t)addr, size, file_offset,
  321             UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) {
  322                 if (kl_addr )
  323                         kfree(kl_addr, kl_size);
  324                 return(LOAD_IOERROR);
  325         }
  326         /* ubc_map(vp); */ /* NOT HERE */
  327         
  328         /*
  329          *      Scan through the commands, processing each one as necessary.
  330          */
  331         for (pass = 1; pass <= 2; pass++) {
  332                 offset = sizeof(struct mach_header);
  333                 ncmds = header->ncmds;
  334                 while (ncmds--) {
  335                         /*
  336                          *      Get a pointer to the command.
  337                          */
  338                         lcp = (struct load_command *)(addr + offset);
  339                         offset += lcp->cmdsize;
  340 
  341                         /*
  342                          *      Check for valid lcp pointer by checking
  343                          *      next offset.
  344                          */
  345                         if (offset > header->sizeofcmds
  346                                         + sizeof(struct mach_header)) {
  347                                 if (kl_addr )
  348                                         kfree(kl_addr, kl_size);
  349                                 return(LOAD_BADMACHO);
  350                         }
  351 
  352                         /*
  353                          *      Check for valid command.
  354                          */
  355                         switch(lcp->cmd) {
  356                         case LC_SEGMENT:
  357                                 if (pass != 1)
  358                                         break;
  359                                 ret = load_segment(
  360                                                (struct segment_command *) lcp,
  361                                                    pager, file_offset,
  362                                                    macho_size,
  363                                                    (unsigned long)ubc_getsize(vp),
  364                                                    map,
  365                                                    result);
  366                                 break;
  367                         case LC_THREAD:
  368                                 if (pass != 2)
  369                                         break;
  370                                 ret = load_thread((struct thread_command *)lcp, thr_act,
  371                                                   result);
  372                                 break;
  373                         case LC_UNIXTHREAD:
  374                                 if (pass != 2)
  375                                         break;
  376                                 ret = load_unixthread(
  377                                                  (struct thread_command *) lcp, thr_act,
  378                                                  result);
  379                                 break;
  380                         case LC_LOAD_DYLINKER:
  381                                 if (pass != 2)
  382                                         break;
  383                                 if ((depth == 1) && (dlp == 0))
  384                                         dlp = (struct dylinker_command *)lcp;
  385                                 else
  386                                         ret = LOAD_FAILURE;
  387                                 break;
  388                         default:
  389                                 ret = LOAD_SUCCESS;/* ignore other stuff */
  390                         }
  391                         if (ret != LOAD_SUCCESS)
  392                                 break;
  393                 }
  394                 if (ret != LOAD_SUCCESS)
  395                         break;
  396         }
  397         if ((ret == LOAD_SUCCESS) && (depth == 1)) {
  398                 vm_offset_t addr;
  399                 shared_region_mapping_t shared_region;
  400                 struct shared_region_task_mappings      map_info;
  401                 shared_region_mapping_t next;
  402 
  403 RedoLookup:
  404                 vm_get_shared_region(task, &shared_region);
  405                 map_info.self = (vm_offset_t)shared_region;
  406                 shared_region_mapping_info(shared_region,
  407                         &(map_info.text_region),   
  408                         &(map_info.text_size),
  409                         &(map_info.data_region),
  410                         &(map_info.data_size),
  411                         &(map_info.region_mappings),
  412                         &(map_info.client_base),
  413                         &(map_info.alternate_base),
  414                         &(map_info.alternate_next), 
  415                         &(map_info.fs_base),
  416                         &(map_info.system),
  417                         &(map_info.flags), &next);
  418 
  419                 if((map_info.flags & SHARED_REGION_FULL) ||
  420                         (map_info.flags & SHARED_REGION_STALE)) {
  421                         shared_region_mapping_t system_region;
  422                         system_region = lookup_default_shared_region(
  423                                 map_info.fs_base, map_info.system);
  424                         if((map_info.self != (vm_offset_t)system_region) &&
  425                                 (map_info.flags & SHARED_REGION_SYSTEM)) {
  426                            if(system_region == NULL) {
  427                                 shared_file_boot_time_init(
  428                                         map_info.fs_base, map_info.system);
  429                            } else {
  430                                 vm_set_shared_region(task, system_region);
  431                            }
  432                            shared_region_mapping_dealloc(
  433                                         (shared_region_mapping_t)map_info.self);
  434                            goto RedoLookup;
  435                         } else if (map_info.flags & SHARED_REGION_SYSTEM) {
  436                               shared_region_mapping_dealloc(system_region);
  437                               shared_file_boot_time_init(
  438                                         map_info.fs_base, map_info.system);
  439                               shared_region_mapping_dealloc(
  440                                      (shared_region_mapping_t)map_info.self);
  441                         } else {
  442                               shared_region_mapping_dealloc(system_region);
  443                         }
  444                 }
  445 
  446 
  447                 if (dylink_test) {
  448                         p->p_flag |=  P_NOSHLIB; /* no shlibs in use */
  449                         addr = map_info.client_base;
  450                         if(clean_regions) {
  451                            vm_map(map, &addr, map_info.text_size, 
  452                                 0, SHARED_LIB_ALIAS,
  453                                 map_info.text_region, 0, FALSE,
  454                                 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
  455                         } else {
  456                            vm_map(map, &addr, map_info.text_size, 0, 
  457                                 (VM_MEMORY_SHARED_PMAP << 24) 
  458                                                 | SHARED_LIB_ALIAS,
  459                                 map_info.text_region, 0, FALSE,
  460                                 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
  461                         }
  462                         addr = map_info.client_base + map_info.text_size;
  463                         vm_map(map, &addr, map_info.data_size, 
  464                                 0, SHARED_LIB_ALIAS,
  465                                 map_info.data_region, 0, TRUE,
  466                                 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
  467         
  468                         while (next) {
  469                            /* this should be fleshed out for the general case */
  470                            /* but this is not necessary for now.  Indeed we   */
  471                            /* are handling the com page inside of the         */
  472                            /* shared_region mapping create calls for now for  */
  473                            /* simplicities sake.  If more general support is  */
  474                            /* needed the code to manipulate the shared range  */
  475                            /* chain can be pulled out and moved to the callers*/
  476                            shared_region_mapping_info(next,
  477                                 &(map_info.text_region),   
  478                                 &(map_info.text_size),
  479                                 &(map_info.data_region),
  480                                 &(map_info.data_size),
  481                                 &(map_info.region_mappings),
  482                                 &(map_info.client_base),
  483                                 &(map_info.alternate_base),
  484                                 &(map_info.alternate_next), 
  485                                 &(map_info.fs_base),
  486                                 &(map_info.system),
  487                                 &(map_info.flags), &next);
  488 
  489                            addr = map_info.client_base;
  490                            vm_map(map, &addr, map_info.text_size, 
  491                                 0, SHARED_LIB_ALIAS,
  492                                 map_info.text_region, 0, FALSE,
  493                                 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
  494                         }
  495                 }
  496         if (dlp != 0) {
  497             ret = load_dylinker(dlp, map, thr_act, 
  498                         depth, result, clean_regions);
  499         }
  500         }
  501 
  502         if (kl_addr )
  503                 kfree(kl_addr, kl_size);
  504 
  505         if ((ret == LOAD_SUCCESS) && (depth == 1) &&
  506                                 (result->thread_count == 0))
  507                 ret = LOAD_FAILURE;
  508         if (ret == LOAD_SUCCESS)
  509                 ubc_map(vp);
  510                 
  511         return(ret);
  512 }
  513 
  514 static
  515 load_return_t
  516 load_segment(
  517         struct segment_command  *scp,
  518         void *                  pager,
  519         unsigned long           pager_offset,
  520         unsigned long           macho_size,
  521         unsigned long           end_of_file,
  522         vm_map_t                map,
  523         load_result_t           *result
  524 )
  525 {
  526         kern_return_t           ret;
  527         vm_offset_t             map_addr, map_offset;
  528         vm_size_t               map_size, seg_size, delta_size;
  529         caddr_t                 tmp;
  530         vm_prot_t               initprot;
  531         vm_prot_t               maxprot;
  532 
  533         /*
  534          * Make sure what we get from the file is really ours (as specified
  535          * by macho_size).
  536          */
  537         if (scp->fileoff + scp->filesize > macho_size)
  538                 return (LOAD_BADMACHO);
  539 
  540         seg_size = round_page_32(scp->vmsize);
  541         if (seg_size == 0)
  542                 return(KERN_SUCCESS);
  543 
  544         /*
  545          *      Round sizes to page size.
  546          */
  547         map_size = round_page_32(scp->filesize);
  548         map_addr = trunc_page_32(scp->vmaddr);
  549 
  550         map_offset = pager_offset + scp->fileoff;
  551 
  552         if (map_size > 0) {
  553                 initprot = (scp->initprot) & VM_PROT_ALL;
  554                 maxprot = (scp->maxprot) & VM_PROT_ALL;
  555                 /*
  556                  *      Map a copy of the file into the address space.
  557                  */
  558                 ret = vm_map(map,
  559                                 &map_addr, map_size, (vm_offset_t)0, FALSE,
  560                                 pager, map_offset, TRUE,
  561                                 initprot, maxprot,
  562                                 VM_INHERIT_DEFAULT);
  563                 if (ret != KERN_SUCCESS)
  564                         return(LOAD_NOSPACE);
  565         
  566                 /*
  567                  *      If the file didn't end on a page boundary,
  568                  *      we need to zero the leftover.
  569                  */
  570                 delta_size = map_size - scp->filesize;
  571 #if FIXME
  572                 if (delta_size > 0) {
  573                         vm_offset_t     tmp;
  574         
  575                         ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE);
  576                         if (ret != KERN_SUCCESS)
  577                                 return(LOAD_RESOURCE);
  578         
  579                         if (copyout(tmp, map_addr + scp->filesize,
  580                                                                 delta_size)) {
  581                                 (void) vm_deallocate(
  582                                                 kernel_map, tmp, delta_size);
  583                                 return(LOAD_FAILURE);
  584                         }
  585                         
  586                         (void) vm_deallocate(kernel_map, tmp, delta_size);
  587                 }
  588 #endif /* FIXME */
  589         }
  590 
  591         /*
  592          *      If the virtual size of the segment is greater
  593          *      than the size from the file, we need to allocate
  594          *      zero fill memory for the rest.
  595          */
  596         delta_size = seg_size - map_size;
  597         if (delta_size > 0) {
  598                 vm_offset_t     tmp = map_addr + map_size;
  599 
  600                 ret = vm_allocate(map, &tmp, delta_size, FALSE);
  601                 if (ret != KERN_SUCCESS)
  602                         return(LOAD_NOSPACE);
  603         }
  604 
  605         /*
  606          *      Set protection values. (Note: ignore errors!)
  607          */
  608 
  609         if (scp->maxprot != VM_PROT_DEFAULT) {
  610                 (void) vm_protect(map,
  611                                         map_addr, seg_size,
  612                                         TRUE, scp->maxprot);
  613         }
  614         if (scp->initprot != VM_PROT_DEFAULT) {
  615                 (void) vm_protect(map,
  616                                       map_addr, seg_size,
  617                                       FALSE, scp->initprot);
  618         }
  619         if ( (scp->fileoff == 0) && (scp->filesize != 0) )
  620                 result->mach_header = map_addr;
  621         return(LOAD_SUCCESS);
  622 }
  623 
  624 static
  625 load_return_t
  626 load_unixthread(
  627         struct thread_command   *tcp,
  628         thread_act_t            thread,
  629         load_result_t           *result
  630 )
  631 {
  632         load_return_t   ret;
  633         int customstack =0;
  634         
  635         if (result->thread_count != 0)
  636                 return (LOAD_FAILURE);
  637         
  638         ret = load_threadstack(thread,
  639                        (unsigned long *)(((vm_offset_t)tcp) + 
  640                                 sizeof(struct thread_command)),
  641                        tcp->cmdsize - sizeof(struct thread_command),
  642                        &result->user_stack,
  643                            &customstack);
  644         if (ret != LOAD_SUCCESS)
  645                 return(ret);
  646 
  647         if (customstack)
  648                         result->customstack = 1;
  649         else
  650                         result->customstack = 0;
  651         ret = load_threadentry(thread,
  652                        (unsigned long *)(((vm_offset_t)tcp) + 
  653                                 sizeof(struct thread_command)),
  654                        tcp->cmdsize - sizeof(struct thread_command),
  655                        &result->entry_point);
  656         if (ret != LOAD_SUCCESS)
  657                 return(ret);
  658 
  659         ret = load_threadstate(thread,
  660                        (unsigned long *)(((vm_offset_t)tcp) + 
  661                                 sizeof(struct thread_command)),
  662                        tcp->cmdsize - sizeof(struct thread_command));
  663         if (ret != LOAD_SUCCESS)
  664                 return (ret);
  665 
  666         result->unixproc = TRUE;
  667         result->thread_count++;
  668 
  669         return(LOAD_SUCCESS);
  670 }
  671 
  672 static
  673 load_return_t
  674 load_thread(
  675         struct thread_command   *tcp,
  676         thread_act_t                    thread,
  677         load_result_t           *result
  678 )
  679 {
  680         kern_return_t   kret;
  681         load_return_t   lret;
  682         task_t                  task;
  683         int customstack=0;
  684 
  685         task = get_threadtask(thread);
  686 
  687         /* if count is 0; same as thr_act */
  688         if (result->thread_count != 0) {
  689                 kret = thread_create(task, &thread);
  690                 if (kret != KERN_SUCCESS)
  691                         return(LOAD_RESOURCE);
  692                 act_deallocate(thread);
  693         }
  694 
  695         lret = load_threadstate(thread,
  696                        (unsigned long *)(((vm_offset_t)tcp) + 
  697                                 sizeof(struct thread_command)),
  698                        tcp->cmdsize - sizeof(struct thread_command));
  699         if (lret != LOAD_SUCCESS)
  700                 return (lret);
  701 
  702         if (result->thread_count == 0) {
  703                 lret = load_threadstack(thread,
  704                                 (unsigned long *)(((vm_offset_t)tcp) + 
  705                                         sizeof(struct thread_command)),
  706                                 tcp->cmdsize - sizeof(struct thread_command),
  707                                 &result->user_stack,
  708                                 &customstack);
  709                 if (customstack)
  710                                 result->customstack = 1;
  711                 else
  712                                 result->customstack = 0;
  713                         
  714                 if (lret != LOAD_SUCCESS)
  715                         return(lret);
  716 
  717                 lret = load_threadentry(thread,
  718                                 (unsigned long *)(((vm_offset_t)tcp) + 
  719                                         sizeof(struct thread_command)),
  720                                 tcp->cmdsize - sizeof(struct thread_command),
  721                                 &result->entry_point);
  722                 if (lret != LOAD_SUCCESS)
  723                         return(lret);
  724         }
  725         /*
  726          *      Resume thread now, note that this means that the thread
  727          *      commands should appear after all the load commands to
  728          *      be sure they don't reference anything not yet mapped.
  729          */
  730         else
  731                 thread_resume(thread);
  732                 
  733         result->thread_count++;
  734 
  735         return(LOAD_SUCCESS);
  736 }
  737 
  738 static
  739 load_return_t
  740 load_threadstate(
  741         thread_t        thread,
  742         unsigned long   *ts,
  743         unsigned long   total_size
  744 )
  745 {
  746         kern_return_t   ret;
  747         unsigned long   size;
  748         int             flavor;
  749 
  750         /*
  751          *      Set the thread state.
  752          */
  753 
  754         while (total_size > 0) {
  755                 flavor = *ts++;
  756                 size = *ts++;
  757                 total_size -= (size+2)*sizeof(unsigned long);
  758                 if (total_size < 0)
  759                         return(LOAD_BADMACHO);
  760                 ret = thread_setstatus(thread, flavor, ts, size);
  761                 if (ret != KERN_SUCCESS)
  762                         return(LOAD_FAILURE);
  763                 ts += size;     /* ts is a (unsigned long *) */
  764         }
  765         return(LOAD_SUCCESS);
  766 }
  767 
  768 static
  769 load_return_t
  770 load_threadstack(
  771         thread_t        thread,
  772         unsigned long   *ts,
  773         unsigned long   total_size,
  774         vm_offset_t     *user_stack,
  775         int *customstack
  776 )
  777 {
  778         kern_return_t   ret;
  779         unsigned long   size;
  780         int             flavor;
  781 
  782         while (total_size > 0) {
  783                 flavor = *ts++;
  784                 size = *ts++;
  785                 total_size -= (size+2)*sizeof(unsigned long);
  786                 if (total_size < 0)
  787                         return(LOAD_BADMACHO);
  788                 *user_stack = USRSTACK;
  789                 ret = thread_userstack(thread, flavor, ts, size,
  790                                 user_stack, customstack);
  791                 if (ret != KERN_SUCCESS)
  792                         return(LOAD_FAILURE);
  793                 ts += size;     /* ts is a (unsigned long *) */
  794         }
  795         return(LOAD_SUCCESS);
  796 }
  797 
  798 static
  799 load_return_t
  800 load_threadentry(
  801         thread_t        thread,
  802         unsigned long   *ts,
  803         unsigned long   total_size,
  804         vm_offset_t     *entry_point
  805 )
  806 {
  807         kern_return_t   ret;
  808         unsigned long   size;
  809         int             flavor;
  810 
  811         /*
  812          *      Set the thread state.
  813          */
  814         *entry_point = 0;
  815         while (total_size > 0) {
  816                 flavor = *ts++;
  817                 size = *ts++;
  818                 total_size -= (size+2)*sizeof(unsigned long);
  819                 if (total_size < 0)
  820                         return(LOAD_BADMACHO);
  821                 ret = thread_entrypoint(thread, flavor, ts, size, entry_point);
  822                 if (ret != KERN_SUCCESS)
  823                         return(LOAD_FAILURE);
  824                 ts += size;     /* ts is a (unsigned long *) */
  825         }
  826         return(LOAD_SUCCESS);
  827 }
  828 
  829 
  830 static
  831 load_return_t
  832 load_dylinker(
  833         struct dylinker_command *lcp,
  834         vm_map_t                map,
  835         thread_act_t    thr_act,
  836         int                     depth,
  837         load_result_t           *result,
  838         boolean_t               clean_regions
  839 )
  840 {
  841         char                    *name;
  842         char                    *p;
  843         struct vnode            *vp;
  844         struct mach_header      header;
  845         unsigned long           file_offset;
  846         unsigned long           macho_size;
  847         vm_map_t                copy_map;
  848         load_result_t           myresult;
  849         kern_return_t           ret;
  850         vm_map_copy_t   tmp;
  851         vm_offset_t     dyl_start, map_addr;
  852         vm_size_t       dyl_length;
  853         extern pmap_t pmap_create(vm_size_t   size); /* XXX */
  854 
  855         name = (char *)lcp + lcp->name.offset;
  856         /*
  857          *      Check for a proper null terminated string.
  858          */
  859         p = name;
  860         do {
  861                 if (p >= (char *)lcp + lcp->cmdsize)
  862                         return(LOAD_BADMACHO);
  863         } while (*p++);
  864 
  865         ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp);
  866         if (ret)
  867                 return (ret);
  868                         
  869         myresult = (load_result_t) { 0 };
  870 
  871         /*
  872          *      Load the Mach-O.
  873          */
  874                 
  875         copy_map = vm_map_create(pmap_create(macho_size),
  876                         get_map_min(map), get_map_max( map), TRUE);
  877 
  878         ret = parse_machfile(vp, copy_map, thr_act, &header,
  879                                 file_offset, macho_size,
  880                                 depth, &myresult, clean_regions);
  881 
  882         if (ret)
  883                 goto out;
  884 
  885         if (get_map_nentries(copy_map) > 0) {
  886 
  887                 dyl_start = get_map_start(copy_map);
  888                 dyl_length = get_map_end(copy_map) - dyl_start;
  889 
  890                 map_addr = dyl_start;
  891                 ret = vm_allocate(map, &map_addr, dyl_length, FALSE);
  892                 if (ret != KERN_SUCCESS)  {
  893                         ret = vm_allocate(map, &map_addr, dyl_length, TRUE);
  894                 }
  895 
  896                 if (ret != KERN_SUCCESS) {
  897                         ret = LOAD_NOSPACE;
  898                         goto out;
  899                 
  900                 }
  901                 ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE,
  902                                 &tmp);
  903                 if (ret != KERN_SUCCESS) {
  904                         (void) vm_map_remove(map,
  905                                              map_addr,
  906                                              map_addr + dyl_length,
  907                                              VM_MAP_NO_FLAGS);
  908                         goto out;
  909                 }
  910 
  911                 ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE);
  912                 if (ret != KERN_SUCCESS) {
  913                                 vm_map_copy_discard(tmp);
  914                                 (void) vm_map_remove(map,
  915                                                      map_addr,
  916                                                      map_addr + dyl_length,
  917                                                      VM_MAP_NO_FLAGS);
  918                                 goto out;               }
  919 
  920                 if (map_addr != dyl_start)
  921                         myresult.entry_point += (map_addr - dyl_start);
  922         } else
  923                 ret = LOAD_FAILURE;
  924         
  925         if (ret == LOAD_SUCCESS) {              
  926                 result->dynlinker = TRUE;
  927                 result->entry_point = myresult.entry_point;
  928                 ubc_map(vp);
  929         }
  930 out:
  931         vm_map_deallocate(copy_map);
  932         
  933         vrele(vp);
  934         return (ret);
  935 
  936 }
  937 
  938 static
  939 load_return_t
  940 get_macho_vnode(
  941         char                    *path,
  942         struct mach_header      *mach_header,
  943         unsigned long           *file_offset,
  944         unsigned long           *macho_size,
  945         struct vnode            **vpp
  946 )
  947 {
  948         struct vnode            *vp;
  949         struct vattr attr, *atp;
  950         struct nameidata nid, *ndp;
  951         struct proc *p = current_proc();                /* XXXX */
  952         boolean_t               is_fat;
  953         struct fat_arch         fat_arch;
  954         int                     error = LOAD_SUCCESS;
  955         int resid;
  956         union {
  957                 struct mach_header      mach_header;
  958                 struct fat_header       fat_header;
  959                 char    pad[512];
  960         } header;
  961         off_t fsize = (off_t)0;
  962         struct  ucred *cred = p->p_ucred;
  963         int err2;
  964         
  965         ndp = &nid;
  966         atp = &attr;
  967         
  968         /* init the namei data to point the file user's program name */
  969         NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
  970 
  971         if (error = namei(ndp)) {
  972                 if (error == ENOENT)
  973                         error = LOAD_ENOENT;
  974                 else
  975                         error = LOAD_FAILURE;
  976                 return(error);
  977         }
  978         
  979         vp = ndp->ni_vp;
  980         
  981         /* check for regular file */
  982         if (vp->v_type != VREG) {
  983                 error = LOAD_PROTECT;
  984                 goto bad1;
  985         }
  986 
  987         /* get attributes */
  988         if (error = VOP_GETATTR(vp, &attr, cred, p)) {
  989                 error = LOAD_FAILURE;
  990                 goto bad1;
  991         }
  992 
  993         /* Check mount point */
  994         if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
  995                 error = LOAD_PROTECT;
  996                 goto bad1;
  997         }
  998 
  999         if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED))
 1000                 atp->va_mode &= ~(VSUID | VSGID);
 1001 
 1002         /* check access.  for root we have to see if any exec bit on */
 1003         if (error = VOP_ACCESS(vp, VEXEC, cred, p)) {
 1004                 error = LOAD_PROTECT;
 1005                 goto bad1;
 1006         }
 1007         if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
 1008                 error = LOAD_PROTECT;
 1009                 goto bad1;
 1010         }
 1011 
 1012         /* hold the vnode for the IO */
 1013         if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) {
 1014                 error = LOAD_ENOENT;
 1015                 goto bad1;
 1016         }
 1017 
 1018         /* try to open it */
 1019         if (error = VOP_OPEN(vp, FREAD, cred, p)) {
 1020                 error = LOAD_PROTECT;
 1021                 ubc_rele(vp);
 1022                 goto bad1;
 1023         }
 1024 
 1025         if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
 1026             UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p)) {
 1027                 error = LOAD_IOERROR;
 1028                 goto bad2;
 1029         }
 1030         
 1031         if (header.mach_header.magic == MH_MAGIC)
 1032             is_fat = FALSE;
 1033         else if (header.fat_header.magic == FAT_MAGIC ||
 1034                  header.fat_header.magic == FAT_CIGAM)
 1035             is_fat = TRUE;
 1036         else {
 1037             error = LOAD_BADMACHO;
 1038             goto bad2;
 1039         }
 1040 
 1041         if (is_fat) {
 1042                 /* Look up our architecture in the fat file. */
 1043                 error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch);
 1044                 if (error != LOAD_SUCCESS)
 1045                         goto bad2;
 1046 
 1047                 /* Read the Mach-O header out of it */
 1048                 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header,
 1049                                 sizeof(header.mach_header), fat_arch.offset,
 1050                                 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
 1051                 if (error) {
 1052                         error = LOAD_IOERROR;
 1053                         goto bad2;
 1054                 }
 1055 
 1056                 /* Is this really a Mach-O? */
 1057                 if (header.mach_header.magic != MH_MAGIC) {
 1058                         error = LOAD_BADMACHO;
 1059                         goto bad2;
 1060                 }
 1061 
 1062                 *file_offset = fat_arch.offset;
 1063                 *macho_size = fsize = fat_arch.size;
 1064         } else {
 1065 
 1066                 *file_offset = 0;
 1067                 *macho_size = fsize = attr.va_size;
 1068         }
 1069 
 1070         *mach_header = header.mach_header;
 1071         *vpp = vp;
 1072         if (UBCISVALID(vp))
 1073                 ubc_setsize(vp, fsize); /* XXX why? */
 1074         
 1075         VOP_UNLOCK(vp, 0, p);
 1076         ubc_rele(vp);
 1077         return (error);
 1078 
 1079 bad2:
 1080         VOP_UNLOCK(vp, 0, p);
 1081         err2 = VOP_CLOSE(vp, FREAD, cred, p);
 1082         ubc_rele(vp);
 1083         vrele(vp);
 1084         return (error);
 1085 
 1086 bad1:
 1087         vput(vp);
 1088         return(error);
 1089 }

Cache object: 23fde70c103b96b28ac56a4c81710815


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.