1 /*-
2 * Copyright (c) 2005 Olivier Houchard. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD: releng/9.0/sys/arm/arm/elf_trampoline.c 214648 2010-11-01 21:04:23Z cognet $");
27 #include <machine/asm.h>
28 #include <sys/param.h>
29 #include <sys/elf32.h>
30 #include <sys/inflate.h>
31 #include <machine/elf.h>
32 #include <machine/pte.h>
33 #include <machine/cpufunc.h>
34 #include <machine/armreg.h>
35
36 /*
37 * Since we are compiled outside of the normal kernel build process, we
38 * need to include opt_global.h manually.
39 */
40 #include "opt_global.h"
41 #include "opt_kernname.h"
42
43 extern char kernel_start[];
44 extern char kernel_end[];
45
46 extern void *_end;
47
48 void _start(void);
49 void __start(void);
50 void __startC(void);
51
52 #define GZ_HEAD 0xa
53
54 #ifdef CPU_ARM7TDMI
55 #define cpu_idcache_wbinv_all arm7tdmi_cache_flushID
56 #elif defined(CPU_ARM8)
57 #define cpu_idcache_wbinv_all arm8_cache_purgeID
58 #elif defined(CPU_ARM9)
59 #define cpu_idcache_wbinv_all arm9_idcache_wbinv_all
60 #elif defined(CPU_FA526) || defined(CPU_FA626TE)
61 #define cpu_idcache_wbinv_all fa526_idcache_wbinv_all
62 #elif defined(CPU_ARM9E)
63 #define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all
64 #elif defined(CPU_ARM10)
65 #define cpu_idcache_wbinv_all arm10_idcache_wbinv_all
66 #elif defined(CPU_SA110) || defined(CPU_SA1110) || defined(CPU_SA1100) || \
67 defined(CPU_IXP12X0)
68 #define cpu_idcache_wbinv_all sa1_cache_purgeID
69 #elif defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
70 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
71 defined(CPU_XSCALE_80219)
72 #define cpu_idcache_wbinv_all xscale_cache_purgeID
73 #elif defined(CPU_XSCALE_81342)
74 #define cpu_idcache_wbinv_all xscalec3_cache_purgeID
75 #endif
76 #ifdef CPU_XSCALE_81342
77 #define cpu_l2cache_wbinv_all xscalec3_l2cache_purge
78 #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
79 #define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all
80 #else
81 #define cpu_l2cache_wbinv_all()
82 #endif
83
84
85 int arm_picache_size;
86 int arm_picache_line_size;
87 int arm_picache_ways;
88
89 int arm_pdcache_size; /* and unified */
90 int arm_pdcache_line_size = 32;
91 int arm_pdcache_ways;
92
93 int arm_pcache_type;
94 int arm_pcache_unified;
95
96 int arm_dcache_align;
97 int arm_dcache_align_mask;
98
99 /* Additional cache information local to this file. Log2 of some of the
100 above numbers. */
101 static int arm_dcache_l2_nsets;
102 static int arm_dcache_l2_assoc;
103 static int arm_dcache_l2_linesize;
104
105
106 int block_userspace_access = 0;
107 extern int arm9_dcache_sets_inc;
108 extern int arm9_dcache_sets_max;
109 extern int arm9_dcache_index_max;
110 extern int arm9_dcache_index_inc;
111
112 static __inline void *
113 memcpy(void *dst, const void *src, int len)
114 {
115 const char *s = src;
116 char *d = dst;
117
118 while (len) {
119 if (0 && len >= 4 && !((vm_offset_t)d & 3) &&
120 !((vm_offset_t)s & 3)) {
121 *(uint32_t *)d = *(uint32_t *)s;
122 s += 4;
123 d += 4;
124 len -= 4;
125 } else {
126 *d++ = *s++;
127 len--;
128 }
129 }
130 return (dst);
131 }
132
133 static __inline void
134 bzero(void *addr, int count)
135 {
136 char *tmp = (char *)addr;
137
138 while (count > 0) {
139 if (count >= 4 && !((vm_offset_t)tmp & 3)) {
140 *(uint32_t *)tmp = 0;
141 tmp += 4;
142 count -= 4;
143 } else {
144 *tmp = 0;
145 tmp++;
146 count--;
147 }
148 }
149 }
150
151 static void arm9_setup(void);
152
153 void
154 _startC(void)
155 {
156 int physaddr = KERNPHYSADDR;
157 int tmp1;
158 unsigned int sp = ((unsigned int)&_end & ~3) + 4;
159 #if defined(FLASHADDR) && defined(LOADERRAMADDR)
160 unsigned int pc;
161
162 __asm __volatile("mov %0, pc\n"
163 : "=r" (pc));
164 if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) ||
165 (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) {
166 /*
167 * We're running from flash, so just copy the whole thing
168 * from flash to memory.
169 * This is far from optimal, we could do the relocation or
170 * the unzipping directly from flash to memory to avoid this
171 * needless copy, but it would require to know the flash
172 * physical address.
173 */
174 unsigned int target_addr;
175 unsigned int tmp_sp;
176 uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR
177 + (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000;
178
179 target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR;
180 tmp_sp = target_addr + 0x100000 +
181 (unsigned int)&_end - (unsigned int)&_start;
182 memcpy((char *)target_addr, (char *)src_addr,
183 (unsigned int)&_end - (unsigned int)&_start);
184 /* Temporary set the sp and jump to the new location. */
185 __asm __volatile(
186 "mov sp, %1\n"
187 "mov pc, %0\n"
188 : : "r" (target_addr), "r" (tmp_sp));
189
190 }
191 #endif
192 #ifdef KZIP
193 sp += KERNSIZE + 0x100;
194 sp &= ~(L1_TABLE_SIZE - 1);
195 sp += 2 * L1_TABLE_SIZE;
196 #endif
197 sp += 1024 * 1024; /* Should be enough for a stack */
198
199 __asm __volatile("adr %0, 2f\n"
200 "bic %0, %0, #0xff000000\n"
201 "and %1, %1, #0xff000000\n"
202 "orr %0, %0, %1\n"
203 "mrc p15, 0, %1, c1, c0, 0\n"
204 "bic %1, %1, #1\n" /* Disable MMU */
205 "orr %1, %1, #(4 | 8)\n" /* Add DC enable,
206 WBUF enable */
207 "orr %1, %1, #0x1000\n" /* Add IC enable */
208 "orr %1, %1, #(0x800)\n" /* BPRD enable */
209
210 "mcr p15, 0, %1, c1, c0, 0\n"
211 "nop\n"
212 "nop\n"
213 "nop\n"
214 "mov pc, %0\n"
215 "2: nop\n"
216 "mov sp, %2\n"
217 : "=r" (tmp1), "+r" (physaddr), "+r" (sp));
218 #ifndef KZIP
219 #ifdef CPU_ARM9
220 /* So that idcache_wbinv works; */
221 if ((cpufunc_id() & 0x0000f000) == 0x00009000)
222 arm9_setup();
223 #endif
224 cpu_idcache_wbinv_all();
225 cpu_l2cache_wbinv_all();
226 #endif
227 __start();
228 }
229
230 static void
231 get_cachetype_cp15()
232 {
233 u_int ctype, isize, dsize;
234 u_int multiplier;
235
236 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
237 : "=r" (ctype));
238
239 /*
240 * ...and thus spake the ARM ARM:
241 *
242 * If an <opcode2> value corresponding to an unimplemented or
243 * reserved ID register is encountered, the System Control
244 * processor returns the value of the main ID register.
245 */
246 if (ctype == cpufunc_id())
247 goto out;
248
249 if ((ctype & CPU_CT_S) == 0)
250 arm_pcache_unified = 1;
251
252 /*
253 * If you want to know how this code works, go read the ARM ARM.
254 */
255
256 arm_pcache_type = CPU_CT_CTYPE(ctype);
257 if (arm_pcache_unified == 0) {
258 isize = CPU_CT_ISIZE(ctype);
259 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
260 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
261 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
262 if (isize & CPU_CT_xSIZE_M)
263 arm_picache_line_size = 0; /* not present */
264 else
265 arm_picache_ways = 1;
266 } else {
267 arm_picache_ways = multiplier <<
268 (CPU_CT_xSIZE_ASSOC(isize) - 1);
269 }
270 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
271 }
272
273 dsize = CPU_CT_DSIZE(ctype);
274 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
275 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
276 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
277 if (dsize & CPU_CT_xSIZE_M)
278 arm_pdcache_line_size = 0; /* not present */
279 else
280 arm_pdcache_ways = 1;
281 } else {
282 arm_pdcache_ways = multiplier <<
283 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
284 }
285 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
286
287 arm_dcache_align = arm_pdcache_line_size;
288
289 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
290 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
291 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
292 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
293 out:
294 arm_dcache_align_mask = arm_dcache_align - 1;
295 }
296
297 static void
298 arm9_setup(void)
299 {
300
301 get_cachetype_cp15();
302 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
303 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
304 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
305 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
306 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
307 }
308
309
310 #ifdef KZIP
311 static unsigned char *orig_input, *i_input, *i_output;
312
313
314 static u_int memcnt; /* Memory allocated: blocks */
315 static size_t memtot; /* Memory allocated: bytes */
316 /*
317 * Library functions required by inflate().
318 */
319
320 #define MEMSIZ 0x8000
321
322 /*
323 * Allocate memory block.
324 */
325 unsigned char *
326 kzipmalloc(int size)
327 {
328 void *ptr;
329 static u_char mem[MEMSIZ];
330
331 if (memtot + size > MEMSIZ)
332 return NULL;
333 ptr = mem + memtot;
334 memtot += size;
335 memcnt++;
336 return ptr;
337 }
338
339 /*
340 * Free allocated memory block.
341 */
342 void
343 kzipfree(void *ptr)
344 {
345 memcnt--;
346 if (!memcnt)
347 memtot = 0;
348 }
349
350 void
351 putstr(char *dummy)
352 {
353 }
354
355 static int
356 input(void *dummy)
357 {
358 if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) {
359 return (GZ_EOF);
360 }
361 return *i_input++;
362 }
363
364 static int
365 output(void *dummy, unsigned char *ptr, unsigned long len)
366 {
367
368
369 memcpy(i_output, ptr, len);
370 i_output += len;
371 return (0);
372 }
373
374 static void *
375 inflate_kernel(void *kernel, void *startaddr)
376 {
377 struct inflate infl;
378 char slide[GZ_WSIZE];
379
380 orig_input = kernel;
381 memcnt = memtot = 0;
382 i_input = (char *)kernel + GZ_HEAD;
383 if (((char *)kernel)[3] & 0x18) {
384 while (*i_input)
385 i_input++;
386 i_input++;
387 }
388 i_output = startaddr;
389 bzero(&infl, sizeof(infl));
390 infl.gz_input = input;
391 infl.gz_output = output;
392 infl.gz_slide = slide;
393 inflate(&infl);
394 return ((char *)(((vm_offset_t)i_output & ~3) + 4));
395 }
396
397 #endif
398
399 void *
400 load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end,
401 int d)
402 {
403 Elf32_Ehdr *eh;
404 Elf32_Phdr phdr[64] /* XXX */, *php;
405 Elf32_Shdr shdr[64] /* XXX */;
406 int i,j;
407 void *entry_point;
408 int symtabindex = -1;
409 int symstrindex = -1;
410 vm_offset_t lastaddr = 0;
411 Elf_Addr ssym = 0;
412 Elf_Dyn *dp;
413
414 eh = (Elf32_Ehdr *)kstart;
415 ssym = 0;
416 entry_point = (void*)eh->e_entry;
417 memcpy(phdr, (void *)(kstart + eh->e_phoff ),
418 eh->e_phnum * sizeof(phdr[0]));
419
420 /* Determine lastaddr. */
421 for (i = 0; i < eh->e_phnum; i++) {
422 if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr
423 + phdr[i].p_memsz))
424 lastaddr = phdr[i].p_vaddr - KERNVIRTADDR +
425 curaddr + phdr[i].p_memsz;
426 }
427
428 /* Save the symbol tables, as there're about to be scratched. */
429 memcpy(shdr, (void *)(kstart + eh->e_shoff),
430 sizeof(*shdr) * eh->e_shnum);
431 if (eh->e_shnum * eh->e_shentsize != 0 &&
432 eh->e_shoff != 0) {
433 for (i = 0; i < eh->e_shnum; i++) {
434 if (shdr[i].sh_type == SHT_SYMTAB) {
435 for (j = 0; j < eh->e_phnum; j++) {
436 if (phdr[j].p_type == PT_LOAD &&
437 shdr[i].sh_offset >=
438 phdr[j].p_offset &&
439 (shdr[i].sh_offset +
440 shdr[i].sh_size <=
441 phdr[j].p_offset +
442 phdr[j].p_filesz)) {
443 shdr[i].sh_offset = 0;
444 shdr[i].sh_size = 0;
445 j = eh->e_phnum;
446 }
447 }
448 if (shdr[i].sh_offset != 0 &&
449 shdr[i].sh_size != 0) {
450 symtabindex = i;
451 symstrindex = shdr[i].sh_link;
452 }
453 }
454 }
455 func_end = roundup(func_end, sizeof(long));
456 if (symtabindex >= 0 && symstrindex >= 0) {
457 ssym = lastaddr;
458 if (d) {
459 memcpy((void *)func_end, (void *)(
460 shdr[symtabindex].sh_offset + kstart),
461 shdr[symtabindex].sh_size);
462 memcpy((void *)(func_end +
463 shdr[symtabindex].sh_size),
464 (void *)(shdr[symstrindex].sh_offset +
465 kstart), shdr[symstrindex].sh_size);
466 } else {
467 lastaddr += shdr[symtabindex].sh_size;
468 lastaddr = roundup(lastaddr,
469 sizeof(shdr[symtabindex].sh_size));
470 lastaddr += sizeof(shdr[symstrindex].sh_size);
471 lastaddr += shdr[symstrindex].sh_size;
472 lastaddr = roundup(lastaddr,
473 sizeof(shdr[symstrindex].sh_size));
474 }
475
476 }
477 }
478 if (!d)
479 return ((void *)lastaddr);
480
481 j = eh->e_phnum;
482 for (i = 0; i < j; i++) {
483 volatile char c;
484
485 if (phdr[i].p_type != PT_LOAD)
486 continue;
487 memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr),
488 (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz);
489 /* Clean space from oversized segments, eg: bss. */
490 if (phdr[i].p_filesz < phdr[i].p_memsz)
491 bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR +
492 curaddr + phdr[i].p_filesz), phdr[i].p_memsz -
493 phdr[i].p_filesz);
494 }
495 /* Now grab the symbol tables. */
496 if (symtabindex >= 0 && symstrindex >= 0) {
497 *(Elf_Size *)lastaddr =
498 shdr[symtabindex].sh_size;
499 lastaddr += sizeof(shdr[symtabindex].sh_size);
500 memcpy((void*)lastaddr,
501 (void *)func_end,
502 shdr[symtabindex].sh_size);
503 lastaddr += shdr[symtabindex].sh_size;
504 lastaddr = roundup(lastaddr,
505 sizeof(shdr[symtabindex].sh_size));
506 *(Elf_Size *)lastaddr =
507 shdr[symstrindex].sh_size;
508 lastaddr += sizeof(shdr[symstrindex].sh_size);
509 memcpy((void*)lastaddr,
510 (void*)(func_end +
511 shdr[symtabindex].sh_size),
512 shdr[symstrindex].sh_size);
513 lastaddr += shdr[symstrindex].sh_size;
514 lastaddr = roundup(lastaddr,
515 sizeof(shdr[symstrindex].sh_size));
516 *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER;
517 *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR;
518 *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR;
519 } else
520 *(Elf_Addr *)curaddr = 0;
521 /* Invalidate the instruction cache. */
522 __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n"
523 "mcr p15, 0, %0, c7, c10, 4\n"
524 : : "r" (curaddr));
525 __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n"
526 "bic %0, %0, #1\n" /* MMU_ENABLE */
527 "mcr p15, 0, %0, c1, c0, 0\n"
528 : "=r" (ssym));
529 /* Jump to the entry point. */
530 ((void(*)(void))(entry_point - KERNVIRTADDR + curaddr))();
531 __asm __volatile(".globl func_end\n"
532 "func_end:");
533
534 }
535
536 extern char func_end[];
537
538
539 #define PMAP_DOMAIN_KERNEL 0 /*
540 * Just define it instead of including the
541 * whole VM headers set.
542 */
543 int __hack;
544 static __inline void
545 setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend,
546 int write_back)
547 {
548 unsigned int *pd = (unsigned int *)pt_addr;
549 vm_paddr_t addr;
550 int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT;
551 int tmp;
552
553 bzero(pd, L1_TABLE_SIZE);
554 for (addr = physstart; addr < physend; addr += L1_S_SIZE) {
555 pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|
556 L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr;
557 if (write_back && 0)
558 pd[addr >> L1_S_SHIFT] |= L1_S_B;
559 }
560 /* XXX: See below */
561 if (0xfff00000 < physstart || 0xfff00000 > physend)
562 pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)|
563 L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart;
564 __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */
565 "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */
566 "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */
567 "mrc p15, 0, %0, c1, c0, 0\n"
568 "orr %0, %0, #1\n" /* MMU_ENABLE */
569 "mcr p15, 0, %0, c1, c0, 0\n"
570 "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */
571 "mov r0, r0\n"
572 "sub pc, pc, #4\n" :
573 "=r" (tmp) : "r" (pd), "r" (domain));
574
575 /*
576 * XXX: This is the most stupid workaround I've ever wrote.
577 * For some reason, the KB9202 won't boot the kernel unless
578 * we access an address which is not in the
579 * 0x20000000 - 0x20ffffff range. I hope I'll understand
580 * what's going on later.
581 */
582 __hack = *(volatile int *)0xfffff21c;
583 }
584
585 void
586 __start(void)
587 {
588 void *curaddr;
589 void *dst, *altdst;
590 char *kernel = (char *)&kernel_start;
591 int sp;
592 int pt_addr;
593
594 __asm __volatile("mov %0, pc" :
595 "=r" (curaddr));
596 curaddr = (void*)((unsigned int)curaddr & 0xfff00000);
597 #ifdef KZIP
598 if (*kernel == 0x1f && kernel[1] == 0x8b) {
599 pt_addr = (((int)&_end + KERNSIZE + 0x100) &
600 ~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
601
602 #ifdef CPU_ARM9
603 /* So that idcache_wbinv works; */
604 if ((cpufunc_id() & 0x0000f000) == 0x00009000)
605 arm9_setup();
606 #endif
607 setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
608 (vm_paddr_t)curaddr + 0x10000000, 1);
609 /* Gzipped kernel */
610 dst = inflate_kernel(kernel, &_end);
611 kernel = (char *)&_end;
612 altdst = 4 + load_kernel((unsigned int)kernel,
613 (unsigned int)curaddr,
614 (unsigned int)&func_end + 800 , 0);
615 if (altdst > dst)
616 dst = altdst;
617 } else
618 #endif
619 dst = 4 + load_kernel((unsigned int)&kernel_start,
620 (unsigned int)curaddr,
621 (unsigned int)&func_end, 0);
622 dst = (void *)(((vm_offset_t)dst & ~3));
623 pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
624 setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
625 (vm_paddr_t)curaddr + 0x10000000, 0);
626 sp = pt_addr + L1_TABLE_SIZE + 8192;
627 sp = sp &~3;
628 dst = (void *)(sp + 4);
629 memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end -
630 (unsigned int)&load_kernel + 800);
631 do_call(dst, kernel, dst + (unsigned int)(&func_end) -
632 (unsigned int)(&load_kernel) + 800, sp);
633 }
Cache object: ee6f52fc9eb2457cf1e5e7d4c22dfa3d
|