The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/elf_trampoline.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2005 Olivier Houchard.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 #include <sys/cdefs.h>
   26 __FBSDID("$FreeBSD$");
   27 #include <machine/asm.h>
   28 #include <sys/param.h>
   29 #include <sys/elf32.h>
   30 #include <sys/inflate.h>
   31 #include <machine/elf.h>
   32 #include <machine/pte.h>
   33 #include <machine/cpufunc.h>
   34 #include <machine/armreg.h>
   35 
   36 #include <stdlib.h>
   37 
   38 /*
   39  * Since we are compiled outside of the normal kernel build process, we
   40  * need to include opt_global.h manually.
   41  */
   42 #include "opt_global.h"
   43 #include "opt_kernname.h"
   44 
   45 extern char kernel_start[];
   46 extern char kernel_end[];
   47 
   48 extern void *_end;
   49 
   50 void __start(void);
   51 void __startC(void);
   52 
   53 #define GZ_HEAD 0xa
   54 
   55 #ifdef CPU_ARM7TDMI
   56 #define cpu_idcache_wbinv_all   arm7tdmi_cache_flushID
   57 #elif defined(CPU_ARM8)
   58 #define cpu_idcache_wbinv_all   arm8_cache_purgeID
   59 #elif defined(CPU_ARM9)
   60 #define cpu_idcache_wbinv_all   arm9_idcache_wbinv_all
   61 #elif defined(CPU_ARM9E)
   62 #define cpu_idcache_wbinv_all   armv5_ec_idcache_wbinv_all
   63 #elif defined(CPU_ARM10)
   64 #define cpu_idcache_wbinv_all   arm10_idcache_wbinv_all
   65 #elif defined(CPU_SA110) || defined(CPU_SA1110) || defined(CPU_SA1100) || \
   66     defined(CPU_IXP12X0)
   67 #define cpu_idcache_wbinv_all   sa1_cache_purgeID
   68 #elif defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
   69   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||   \
   70   defined(CPU_XSCALE_80219)
   71 #define cpu_idcache_wbinv_all   xscale_cache_purgeID
   72 #elif defined(CPU_XSCALE_81342)
   73 #define cpu_idcache_wbinv_all   xscalec3_cache_purgeID
   74 #endif
   75 #ifdef CPU_XSCALE_81342
   76 #define cpu_l2cache_wbinv_all   xscalec3_l2cache_purge
   77 #else
   78 #define cpu_l2cache_wbinv_all() 
   79 #endif
   80 
   81 
   82 int     arm_picache_size;
   83 int     arm_picache_line_size;
   84 int     arm_picache_ways;
   85 
   86 int     arm_pdcache_size;       /* and unified */
   87 int     arm_pdcache_line_size = 32;
   88 int     arm_pdcache_ways;
   89 
   90 int     arm_pcache_type;
   91 int     arm_pcache_unified;
   92 
   93 int     arm_dcache_align;
   94 int     arm_dcache_align_mask;
   95 
   96 /* Additional cache information local to this file.  Log2 of some of the
   97       above numbers.  */
   98 static int      arm_dcache_l2_nsets;
   99 static int      arm_dcache_l2_assoc;
  100 static int      arm_dcache_l2_linesize;
  101 
  102 
  103 int block_userspace_access = 0;
  104 extern int arm9_dcache_sets_inc;
  105 extern int arm9_dcache_sets_max;
  106 extern int arm9_dcache_index_max;
  107 extern int arm9_dcache_index_inc;
  108 
  109 static __inline void *
  110 memcpy(void *dst, const void *src, int len)
  111 {
  112         const char *s = src;
  113         char *d = dst;
  114 
  115         while (len) {
  116                 if (0 && len >= 4 && !((vm_offset_t)d & 3) &&
  117                     !((vm_offset_t)s & 3)) {
  118                         *(uint32_t *)d = *(uint32_t *)s;
  119                         s += 4;
  120                         d += 4;
  121                         len -= 4;
  122                 } else {
  123                         *d++ = *s++;
  124                         len--;
  125                 }
  126         }
  127         return (dst);
  128 }
  129 
  130 static __inline void
  131 bzero(void *addr, int count)
  132 {
  133         char *tmp = (char *)addr;
  134 
  135         while (count > 0) {
  136                 if (count >= 4 && !((vm_offset_t)tmp & 3)) {
  137                         *(uint32_t *)tmp = 0;
  138                         tmp += 4;
  139                         count -= 4;
  140                 } else {
  141                         *tmp = 0;
  142                         tmp++;
  143                         count--;
  144                 }
  145         }
  146 }
  147 
  148 static void arm9_setup(void);
  149 
  150 void
  151 _startC(void)
  152 {
  153         int physaddr = KERNPHYSADDR;
  154         int tmp1;
  155         unsigned int sp = ((unsigned int)&_end & ~3) + 4;
  156 #if defined(FLASHADDR) && defined(LOADERRAMADDR)
  157         unsigned int pc;
  158 
  159         __asm __volatile("adr %0, _start\n"
  160             : "=r" (pc));
  161         if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) ||
  162             (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) {
  163                 /*
  164                  * We're running from flash, so just copy the whole thing
  165                  * from flash to memory.
  166                  * This is far from optimal, we could do the relocation or
  167                  * the unzipping directly from flash to memory to avoid this
  168                  * needless copy, but it would require to know the flash
  169                  * physical address.
  170                  */
  171                 unsigned int target_addr;
  172                 unsigned int tmp_sp;
  173 
  174                 target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR;
  175                 tmp_sp = target_addr + 0x100000 +
  176                     (unsigned int)&_end - (unsigned int)&_start;
  177                 memcpy((char *)target_addr, (char *)pc,
  178                     (unsigned int)&_end - (unsigned int)&_start);
  179                 /* Temporary set the sp and jump to the new location. */
  180                 __asm __volatile(
  181                     "mov sp, %1\n"
  182                     "mov pc, %0\n"
  183                     : : "r" (target_addr), "r" (tmp_sp));
  184                 
  185         }
  186 #endif
  187 #ifdef KZIP
  188         sp += KERNSIZE + 0x100;
  189         sp &= ~(L1_TABLE_SIZE - 1);
  190         sp += 2 * L1_TABLE_SIZE;
  191 #endif
  192         sp += 1024 * 1024; /* Should be enough for a stack */
  193         
  194         __asm __volatile("adr %0, 2f\n"
  195                          "bic %0, %0, #0xff000000\n"
  196                          "and %1, %1, #0xff000000\n"
  197                          "orr %0, %0, %1\n"
  198                          "mrc p15, 0, %1, c1, c0, 0\n"
  199                          "bic %1, %1, #1\n" /* Disable MMU */
  200                          "orr %1, %1, #(4 | 8)\n" /* Add DC enable, 
  201                                                      WBUF enable */
  202                          "orr %1, %1, #0x1000\n" /* Add IC enable */
  203                          "orr %1, %1, #(0x800)\n" /* BPRD enable */
  204 
  205                          "mcr p15, 0, %1, c1, c0, 0\n"
  206                          "nop\n"
  207                          "nop\n"
  208                          "nop\n"
  209                          "mov pc, %0\n"
  210                          "2: nop\n"
  211                          "mov sp, %2\n"
  212                          : "=r" (tmp1), "+r" (physaddr), "+r" (sp));
  213 #ifndef KZIP
  214 #ifdef CPU_ARM9
  215         /* So that idcache_wbinv works; */
  216         if ((cpufunc_id() & 0x0000f000) == 0x00009000)
  217                 arm9_setup();
  218 #endif
  219         cpu_idcache_wbinv_all();
  220         cpu_l2cache_wbinv_all();
  221 #endif
  222         __start();
  223 }
  224 
  225 static void
  226 get_cachetype_cp15()
  227 {
  228         u_int ctype, isize, dsize;
  229         u_int multiplier;
  230 
  231         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
  232             : "=r" (ctype));
  233         
  234         /*
  235          * ...and thus spake the ARM ARM:
  236          *
  237          * If an <opcode2> value corresponding to an unimplemented or
  238          * reserved ID register is encountered, the System Control
  239          * processor returns the value of the main ID register.
  240          */
  241         if (ctype == cpufunc_id())
  242                 goto out;
  243         
  244         if ((ctype & CPU_CT_S) == 0)
  245                 arm_pcache_unified = 1;
  246 
  247         /*
  248          * If you want to know how this code works, go read the ARM ARM.
  249          */
  250         
  251         arm_pcache_type = CPU_CT_CTYPE(ctype);
  252         if (arm_pcache_unified == 0) {
  253                 isize = CPU_CT_ISIZE(ctype);
  254                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
  255                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
  256                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
  257                         if (isize & CPU_CT_xSIZE_M)
  258                                 arm_picache_line_size = 0; /* not present */
  259                         else
  260                                 arm_picache_ways = 1;
  261                 } else {
  262                         arm_picache_ways = multiplier <<
  263                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
  264                 }
  265                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
  266         }
  267         
  268         dsize = CPU_CT_DSIZE(ctype);
  269         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
  270         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
  271         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
  272                 if (dsize & CPU_CT_xSIZE_M)
  273                         arm_pdcache_line_size = 0; /* not present */
  274                 else
  275                         arm_pdcache_ways = 1;
  276         } else {
  277                 arm_pdcache_ways = multiplier <<
  278                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
  279         }
  280         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
  281         
  282         arm_dcache_align = arm_pdcache_line_size;
  283         
  284         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
  285         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
  286         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
  287             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
  288  out:
  289         arm_dcache_align_mask = arm_dcache_align - 1;
  290 }
  291 
  292 static void
  293 arm9_setup(void)
  294 {
  295         
  296         get_cachetype_cp15();
  297         arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
  298         arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
  299             arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
  300         arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
  301         arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
  302 }
  303 
  304 
  305 #ifdef KZIP
  306 static  unsigned char *orig_input, *i_input, *i_output;
  307 
  308 
  309 static u_int memcnt;            /* Memory allocated: blocks */
  310 static size_t memtot;           /* Memory allocated: bytes */
  311 /*
  312  * Library functions required by inflate().
  313  */
  314 
  315 #define MEMSIZ 0x8000
  316 
  317 /*
  318  * Allocate memory block.
  319  */
  320 unsigned char *
  321 kzipmalloc(int size)
  322 {
  323         void *ptr;
  324         static u_char mem[MEMSIZ];
  325 
  326         if (memtot + size > MEMSIZ)
  327                 return NULL;
  328         ptr = mem + memtot;
  329         memtot += size;
  330         memcnt++;
  331         return ptr;
  332 }
  333 
  334 /*
  335  * Free allocated memory block.
  336  */
  337 void
  338 kzipfree(void *ptr)
  339 {
  340         memcnt--;
  341         if (!memcnt)
  342                 memtot = 0;
  343 }
  344 
  345 void
  346 putstr(char *dummy)
  347 {
  348 }
  349 
  350 static int
  351 input(void *dummy)
  352 {
  353         if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) {
  354                 return (GZ_EOF);
  355         }
  356         return *i_input++;
  357 }
  358 
  359 static int
  360 output(void *dummy, unsigned char *ptr, unsigned long len)
  361 {
  362 
  363 
  364         memcpy(i_output, ptr, len);
  365         i_output += len;
  366         return (0);
  367 }
  368 
  369 static void *
  370 inflate_kernel(void *kernel, void *startaddr)
  371 {
  372         struct inflate infl;
  373         char slide[GZ_WSIZE];
  374 
  375         orig_input = kernel;
  376         memcnt = memtot = 0;
  377         i_input = (char *)kernel + GZ_HEAD;
  378         if (((char *)kernel)[3] & 0x18) {
  379                 while (*i_input)
  380                         i_input++;
  381                 i_input++;
  382         }
  383         i_output = startaddr;
  384         bzero(&infl, sizeof(infl));
  385         infl.gz_input = input;
  386         infl.gz_output = output;
  387         infl.gz_slide = slide;
  388         inflate(&infl);
  389         return ((char *)(((vm_offset_t)i_output & ~3) + 4));
  390 }
  391 
  392 #endif
  393 
  394 void *
  395 load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, 
  396     int d)
  397 {
  398         Elf32_Ehdr *eh;
  399         Elf32_Phdr phdr[64] /* XXX */, *php;
  400         Elf32_Shdr shdr[64] /* XXX */;
  401         int i,j;
  402         void *entry_point;
  403         int symtabindex = -1;
  404         int symstrindex = -1;
  405         vm_offset_t lastaddr = 0;
  406         Elf_Addr ssym = 0, esym = 0;
  407         Elf_Dyn *dp;
  408         
  409         eh = (Elf32_Ehdr *)kstart;
  410         ssym = esym = 0;
  411         entry_point = (void*)eh->e_entry;
  412         memcpy(phdr, (void *)(kstart + eh->e_phoff ),
  413             eh->e_phnum * sizeof(phdr[0]));
  414 
  415         /* Determine lastaddr. */
  416         for (i = 0; i < eh->e_phnum; i++) {
  417                 if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr
  418                     + phdr[i].p_memsz))
  419                         lastaddr = phdr[i].p_vaddr - KERNVIRTADDR +
  420                             curaddr + phdr[i].p_memsz;
  421         }
  422         
  423         /* Save the symbol tables, as there're about to be scratched. */
  424         memcpy(shdr, (void *)(kstart + eh->e_shoff),
  425             sizeof(*shdr) * eh->e_shnum);
  426         if (eh->e_shnum * eh->e_shentsize != 0 &&
  427             eh->e_shoff != 0) {
  428                 for (i = 0; i < eh->e_shnum; i++) {
  429                         if (shdr[i].sh_type == SHT_SYMTAB) {
  430                                 for (j = 0; j < eh->e_phnum; j++) {
  431                                         if (phdr[j].p_type == PT_LOAD &&
  432                                             shdr[i].sh_offset >=
  433                                             phdr[j].p_offset &&
  434                                             (shdr[i].sh_offset + 
  435                                              shdr[i].sh_size <=
  436                                              phdr[j].p_offset +
  437                                              phdr[j].p_filesz)) {
  438                                                 shdr[i].sh_offset = 0;
  439                                                 shdr[i].sh_size = 0;
  440                                                 j = eh->e_phnum;
  441                                         }
  442                                 }
  443                                 if (shdr[i].sh_offset != 0 && 
  444                                     shdr[i].sh_size != 0) {
  445                                         symtabindex = i;
  446                                         symstrindex = shdr[i].sh_link;
  447                                 }
  448                         }
  449                 }
  450                 func_end = roundup(func_end, sizeof(long));
  451                 if (symtabindex >= 0 && symstrindex >= 0) {
  452                         ssym = lastaddr;
  453                         if (d) {
  454                                 memcpy((void *)func_end, (void *)(
  455                                     shdr[symtabindex].sh_offset + kstart), 
  456                                     shdr[symtabindex].sh_size);
  457                                 memcpy((void *)(func_end +
  458                                     shdr[symtabindex].sh_size),
  459                                     (void *)(shdr[symstrindex].sh_offset +
  460                                     kstart), shdr[symstrindex].sh_size);
  461                         } else {
  462                                 lastaddr += shdr[symtabindex].sh_size;
  463                                 lastaddr = roundup(lastaddr,
  464                                     sizeof(shdr[symtabindex].sh_size));
  465                                 lastaddr += sizeof(shdr[symstrindex].sh_size);
  466                                 lastaddr += shdr[symstrindex].sh_size;
  467                                 lastaddr = roundup(lastaddr, 
  468                                     sizeof(shdr[symstrindex].sh_size));
  469                         }
  470                         
  471                 }
  472         }
  473         if (!d)
  474                 return ((void *)lastaddr);
  475         
  476         j = eh->e_phnum;
  477         for (i = 0; i < j; i++) {
  478                 volatile char c;
  479 
  480                 if (phdr[i].p_type != PT_LOAD)
  481                         continue;
  482                 memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr),
  483                     (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz);
  484                 /* Clean space from oversized segments, eg: bss. */
  485                 if (phdr[i].p_filesz < phdr[i].p_memsz)
  486                         bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + 
  487                             curaddr + phdr[i].p_filesz), phdr[i].p_memsz -
  488                             phdr[i].p_filesz);
  489         }
  490         /* Now grab the symbol tables. */
  491         if (symtabindex >= 0 && symstrindex >= 0) {
  492                 *(Elf_Size *)lastaddr = 
  493                     shdr[symtabindex].sh_size;
  494                 lastaddr += sizeof(shdr[symtabindex].sh_size);
  495                 memcpy((void*)lastaddr,
  496                     (void *)func_end,
  497                     shdr[symtabindex].sh_size);
  498                 lastaddr += shdr[symtabindex].sh_size;
  499                 lastaddr = roundup(lastaddr,
  500                     sizeof(shdr[symtabindex].sh_size));
  501                 *(Elf_Size *)lastaddr =
  502                     shdr[symstrindex].sh_size;
  503                 lastaddr += sizeof(shdr[symstrindex].sh_size);
  504                 memcpy((void*)lastaddr,
  505                     (void*)(func_end +
  506                             shdr[symtabindex].sh_size),
  507                     shdr[symstrindex].sh_size);
  508                 lastaddr += shdr[symstrindex].sh_size;
  509                 lastaddr = roundup(lastaddr, 
  510                     sizeof(shdr[symstrindex].sh_size));
  511                 *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER;
  512                 *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR;
  513                 *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR;
  514         } else
  515                 *(Elf_Addr *)curaddr = 0;
  516         /* Invalidate the instruction cache. */
  517         __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n"
  518                          "mcr p15, 0, %0, c7, c10, 4\n"
  519                          : : "r" (curaddr));
  520         __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n"
  521             "bic %0, %0, #1\n" /* MMU_ENABLE */
  522             "mcr p15, 0, %0, c1, c0, 0\n"
  523             : "=r" (ssym));
  524         /* Jump to the entry point. */
  525         ((void(*)(void))(entry_point - KERNVIRTADDR + curaddr))();
  526         __asm __volatile(".globl func_end\n"
  527             "func_end:");
  528         
  529 }
  530 
  531 extern char func_end[];
  532 
  533 
  534 #define PMAP_DOMAIN_KERNEL      0 /*
  535                                     * Just define it instead of including the
  536                                     * whole VM headers set.
  537                                     */
  538 int __hack;
  539 static __inline void
  540 setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend,
  541     int write_back)
  542 {
  543         unsigned int *pd = (unsigned int *)pt_addr;
  544         vm_paddr_t addr;
  545         int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT;
  546         int tmp;
  547 
  548         bzero(pd, L1_TABLE_SIZE);
  549         for (addr = physstart; addr < physend; addr += L1_S_SIZE) {
  550                 pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|
  551                     L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr;
  552                 if (write_back)
  553                         pd[addr >> L1_S_SHIFT] |= L1_S_B;
  554         }
  555         /* XXX: See below */
  556         if (0xfff00000 < physstart || 0xfff00000 > physend)
  557                 pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)|
  558                     L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart;
  559         __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */
  560                          "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */
  561                          "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */
  562                          "mrc p15, 0, %0, c1, c0, 0\n"
  563                          "orr %0, %0, #1\n" /* MMU_ENABLE */
  564                          "mcr p15, 0, %0, c1, c0, 0\n"
  565                          "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */
  566                          "mov r0, r0\n"
  567                          "sub pc, pc, #4\n" :
  568                          "=r" (tmp) : "r" (pd), "r" (domain));
  569         
  570         /* 
  571          * XXX: This is the most stupid workaround I've ever wrote.
  572          * For some reason, the KB9202 won't boot the kernel unless
  573          * we access an address which is not in the 
  574          * 0x20000000 - 0x20ffffff range. I hope I'll understand
  575          * what's going on later.
  576          */
  577         __hack = *(volatile int *)0xfffff21c;
  578 }
  579 
  580 void
  581 __start(void)
  582 {
  583         void *curaddr;
  584         void *dst, *altdst;
  585         char *kernel = (char *)&kernel_start;
  586         int sp;
  587         int pt_addr;
  588 
  589         __asm __volatile("mov %0, pc"  :
  590             "=r" (curaddr));
  591         curaddr = (void*)((unsigned int)curaddr & 0xfff00000);
  592 #ifdef KZIP
  593         if (*kernel == 0x1f && kernel[1] == 0x8b) {
  594                 pt_addr = (((int)&_end + KERNSIZE + 0x100) & 
  595                     ~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
  596                 
  597 #ifdef CPU_ARM9
  598                 /* So that idcache_wbinv works; */
  599                 if ((cpufunc_id() & 0x0000f000) == 0x00009000)
  600                         arm9_setup();
  601 #endif
  602                 setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
  603                     (vm_paddr_t)curaddr + 0x10000000, 1);
  604                 /* Gzipped kernel */
  605                 dst = inflate_kernel(kernel, &_end);
  606                 kernel = (char *)&_end;
  607                 altdst = 4 + load_kernel((unsigned int)kernel, 
  608                     (unsigned int)curaddr,
  609                     (unsigned int)&func_end + 800 , 0);
  610                 if (altdst > dst)
  611                         dst = altdst;
  612                 cpu_idcache_wbinv_all();
  613                 cpu_l2cache_wbinv_all();
  614                 __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n"
  615                     "bic %0, %0, #1\n" /* MMU_ENABLE */
  616                     "mcr p15, 0, %0, c1, c0, 0\n"
  617                     : "=r" (pt_addr));
  618         } else
  619 #endif
  620                 dst = 4 + load_kernel((unsigned int)&kernel_start, 
  621             (unsigned int)curaddr, 
  622             (unsigned int)&func_end, 0);
  623         dst = (void *)(((vm_offset_t)dst & ~3));
  624         pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
  625         setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
  626             (vm_paddr_t)curaddr + 0x10000000, 0);       
  627         sp = pt_addr + L1_TABLE_SIZE + 8192;
  628         sp = sp &~3;
  629         dst = (void *)(sp + 4);
  630         memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - 
  631             (unsigned int)&load_kernel + 800);
  632         do_call(dst, kernel, dst + (unsigned int)(&func_end) - 
  633             (unsigned int)(&load_kernel) + 800, sp);
  634 }

Cache object: 0abc90844bffe109ec6fe4d85c96428d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.