1 /* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
2
3 /*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36 #include "assym.s"
37 #include <sys/syscall.h>
38 #include <machine/asm.h>
39 #include <machine/armreg.h>
40 #include <machine/cpuconf.h>
41 #include <machine/pte.h>
42
43 __FBSDID("$FreeBSD: releng/10.2/sys/arm/arm/locore-v4.S 278652 2015-02-13 00:49:47Z ian $");
44
45 /*
46 * Sanity check the configuration.
47 * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases.
48 * ARMv4 and ARMv5 make assumptions on where they are loaded.
49 *
50 * TODO: Fix the ARMv4/v5 case.
51 */
52 #if (defined(FLASHADDR) || defined(LOADERRAMADDR) || !defined(_ARM_ARCH_6)) && \
53 !defined(PHYSADDR)
54 #error PHYSADDR must be defined for this configuration
55 #endif
56
57 /* What size should this really be ? It is only used by initarm() */
58 #define INIT_ARM_STACK_SIZE (2048 * 4)
59
60 #define CPWAIT_BRANCH \
61 sub pc, pc, #4
62
63 #define CPWAIT(tmp) \
64 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
65 mov tmp, tmp /* wait for it to complete */ ;\
66 CPWAIT_BRANCH /* branch to next insn */
67
68 /*
69 * This is for libkvm, and should be the address of the beginning
70 * of the kernel text segment (not necessarily the same as kernbase).
71 *
72 * These are being phased out. Newer copies of libkvm don't need these
73 * values as the information is added to the core file by inspecting
74 * the running kernel.
75 */
76 .text
77 .align 2
78 #ifdef PHYSADDR
79 .globl kernbase
80 .set kernbase,KERNBASE
81 .globl physaddr
82 .set physaddr,PHYSADDR
83 #endif
84
85 /*
86 * On entry for FreeBSD boot ABI:
87 * r0 - metadata pointer or 0 (boothowto on AT91's boot2)
88 * r1 - if (r0 == 0) then metadata pointer
89 * On entry for Linux boot ABI:
90 * r0 - 0
91 * r1 - machine type (passed as arg2 to initarm)
92 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
93 *
94 * For both types of boot we gather up the args, put them in a struct arm_boot_params
95 * structure and pass that to initarm.
96 */
97 .globl btext
98 btext:
99 ASENTRY_NP(_start)
100 STOP_UNWINDING /* Can't unwind into the bootloader! */
101
102 mov r9, r0 /* 0 or boot mode from boot2 */
103 mov r8, r1 /* Save Machine type */
104 mov ip, r2 /* Save meta data */
105 mov fp, r3 /* Future expansion */
106
107 /* Make sure interrupts are disabled. */
108 mrs r7, cpsr
109 orr r7, r7, #(PSR_I | PSR_F)
110 msr cpsr_c, r7
111
112 #if defined (FLASHADDR) && defined(LOADERRAMADDR)
113 /* Check if we're running from flash. */
114 ldr r7, =FLASHADDR
115 /*
116 * If we're running with MMU disabled, test against the
117 * physical address instead.
118 */
119 mrc p15, 0, r2, c1, c0, 0
120 ands r2, r2, #CPU_CONTROL_MMU_ENABLE
121 ldreq r6, =PHYSADDR
122 ldrne r6, =LOADERRAMADDR
123 cmp r7, r6
124 bls flash_lower
125 cmp r7, pc
126 bhi from_ram
127 b do_copy
128
129 flash_lower:
130 cmp r6, pc
131 bls from_ram
132 do_copy:
133 ldr r7, =KERNBASE
134 adr r1, _start
135 ldr r0, Lreal_start
136 ldr r2, Lend
137 sub r2, r2, r0
138 sub r0, r0, r7
139 add r0, r0, r6
140 mov r4, r0
141 bl memcpy
142 ldr r0, Lram_offset
143 add pc, r4, r0
144 Lram_offset: .word from_ram-_C_LABEL(_start)
145 from_ram:
146 nop
147 #endif
148
149 disable_mmu:
150 /* Disable MMU for a while */
151 mrc p15, 0, r2, c1, c0, 0
152 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
153 CPU_CONTROL_WBUF_ENABLE)
154 bic r2, r2, #(CPU_CONTROL_IC_ENABLE)
155 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
156 mcr p15, 0, r2, c1, c0, 0
157
158 nop
159 nop
160 nop
161 CPWAIT(r0)
162
163 Lunmapped:
164 /*
165 * Build page table from scratch.
166 */
167
168 /* Find the delta between VA and PA */
169 adr r0, Lpagetable
170 bl translate_va_to_pa
171
172 #ifndef _ARM_ARCH_6
173 /*
174 * Some of the older ports (the various XScale, mostly) assume
175 * that the memory before the kernel is mapped, and use it for
176 * the various stacks, page tables, etc. For those CPUs, map the
177 * 64 first MB of RAM, as it used to be.
178 */
179 /*
180 * Map PA == VA
181 */
182 ldr r5, =PHYSADDR
183 mov r1, r5
184 mov r2, r5
185 /* Map 64MiB, preserved over calls to build_pagetables */
186 mov r3, #64
187 bl build_pagetables
188
189 /* Create the kernel map to jump to */
190 mov r1, r5
191 ldr r2, =(KERNBASE)
192 bl build_pagetables
193 ldr r5, =(KERNPHYSADDR)
194 #else
195 /*
196 * Map PA == VA
197 */
198 /* Find the start kernels load address */
199 adr r5, _start
200 ldr r2, =(L1_S_OFFSET)
201 bic r5, r2
202 mov r1, r5
203 mov r2, r5
204 /* Map 64MiB, preserved over calls to build_pagetables */
205 mov r3, #64
206 bl build_pagetables
207
208 /* Create the kernel map to jump to */
209 mov r1, r5
210 ldr r2, =(KERNVIRTADDR)
211 bl build_pagetables
212 #endif
213
214 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
215 /* Create the custom map */
216 ldr r1, =SOCDEV_PA
217 ldr r2, =SOCDEV_VA
218 bl build_pagetables
219 #endif
220
221 #if defined(SMP)
222 orr r0, r0, #2 /* Set TTB shared memory flag */
223 #endif
224 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
225 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
226
227 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
228 mov r0, #0
229 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */
230 #endif
231
232 /* Set the Domain Access register. Very important! */
233 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
234 mcr p15, 0, r0, c3, c0, 0
235 /*
236 * Enable MMU.
237 * On armv6 enable extended page tables, and set alignment checking
238 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
239 * instructions emitted by clang.
240 */
241 mrc p15, 0, r0, c1, c0, 0
242 #ifdef _ARM_ARCH_6
243 orr r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
244 orr r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
245 orr r0, r0, #(CPU_CONTROL_AF_ENABLE)
246 #endif
247 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE)
248 mcr p15, 0, r0, c1, c0, 0
249 nop
250 nop
251 nop
252 CPWAIT(r0)
253
254 mmu_done:
255 nop
256 adr r1, .Lstart
257 ldmia r1, {r1, r2, sp} /* Set initial stack and */
258 sub r2, r2, r1 /* get zero init data */
259 mov r3, #0
260 .L1:
261 str r3, [r1], #0x0004 /* get zero init data */
262 subs r2, r2, #4
263 bgt .L1
264 ldr pc, .Lvirt_done
265
266 virt_done:
267 mov r1, #28 /* loader info size is 28 bytes also second arg */
268 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */
269 mov r0, sp /* loader info pointer is first arg */
270 bic sp, sp, #7 /* align stack to 8 bytes */
271 str r1, [r0] /* Store length of loader info */
272 str r9, [r0, #4] /* Store r0 from boot loader */
273 str r8, [r0, #8] /* Store r1 from boot loader */
274 str ip, [r0, #12] /* store r2 from boot loader */
275 str fp, [r0, #16] /* store r3 from boot loader */
276 str r5, [r0, #20] /* store the physical address */
277 adr r4, Lpagetable /* load the pagetable address */
278 ldr r5, [r4, #4]
279 str r5, [r0, #24] /* store the pagetable address */
280 mov fp, #0 /* trace back starts here */
281 bl _C_LABEL(initarm) /* Off we go */
282
283 /* init arm will return the new stack pointer. */
284 mov sp, r0
285
286 bl _C_LABEL(mi_startup) /* call mi_startup()! */
287
288 adr r0, .Lmainreturned
289 b _C_LABEL(panic)
290 /* NOTREACHED */
291 END(_start)
292
293 #define VA_TO_PA_POINTER(name, table) \
294 name: ;\
295 .word . ;\
296 .word table
297
298 /*
299 * Returns the physical address of a magic va to pa pointer.
300 * r0 - The pagetable data pointer. This must be built using the
301 * VA_TO_PA_POINTER macro.
302 * e.g.
303 * VA_TO_PA_POINTER(Lpagetable, pagetable)
304 * ...
305 * adr r0, Lpagetable
306 * bl translate_va_to_pa
307 * r0 will now contain the physical address of pagetable
308 * r1, r2 - Trashed
309 */
310 translate_va_to_pa:
311 ldr r1, [r0]
312 sub r2, r1, r0
313 /* At this point: r2 = VA - PA */
314
315 /*
316 * Find the physical address of the table. After these two
317 * instructions:
318 * r1 = va(pagetable)
319 *
320 * r0 = va(pagetable) - (VA - PA)
321 * = va(pagetable) - VA + PA
322 * = pa(pagetable)
323 */
324 ldr r1, [r0, #4]
325 sub r0, r1, r2
326 RET
327
328 /*
329 * Builds the page table
330 * r0 - The table base address
331 * r1 - The physical address (trashed)
332 * r2 - The virtual address (trashed)
333 * r3 - The number of 1MiB sections
334 * r4 - Trashed
335 *
336 * Addresses must be 1MiB aligned
337 */
338 build_pagetables:
339 /* Set the required page attributed */
340 ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
341 #if defined(SMP)
342 orr r4, #(L1_SHARED)
343 #endif
344 orr r1, r4
345
346 /* Move the virtual address to the correct bit location */
347 lsr r2, #(L1_S_SHIFT - 2)
348
349 mov r4, r3
350 1:
351 str r1, [r0, r2]
352 add r2, r2, #4
353 add r1, r1, #(L1_S_SIZE)
354 adds r4, r4, #-1
355 bhi 1b
356
357 RET
358
359 VA_TO_PA_POINTER(Lpagetable, pagetable)
360
361 Lreal_start:
362 .word _start
363 Lend:
364 .word _edata
365
366 .Lstart:
367 .word _edata
368 .word _ebss
369 .word svcstk + INIT_ARM_STACK_SIZE
370
371 .Lvirt_done:
372 .word virt_done
373
374 .Lmainreturned:
375 .asciz "main() returned"
376 .align 2
377
378 .bss
379 svcstk:
380 .space INIT_ARM_STACK_SIZE
381
382 /*
383 * Memory for the initial pagetable. We are unable to place this in
384 * the bss as this will be cleared after the table is loaded.
385 */
386 .section ".init_pagetable"
387 .align 14 /* 16KiB aligned */
388 pagetable:
389 .space L1_TABLE_SIZE
390
391 .text
392 .align 2
393
394 .Lcpufuncs:
395 .word _C_LABEL(cpufuncs)
396
397 #if defined(SMP)
398
399 .Lmpvirt_done:
400 .word mpvirt_done
401 VA_TO_PA_POINTER(Lstartup_pagetable_secondary, temp_pagetable)
402
403 ASENTRY_NP(mpentry)
404
405 /* Make sure interrupts are disabled. */
406 mrs r7, cpsr
407 orr r7, r7, #(PSR_I | PSR_F)
408 msr cpsr_c, r7
409
410 /* Disable MMU. It should be disabled already, but make sure. */
411 mrc p15, 0, r2, c1, c0, 0
412 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
413 CPU_CONTROL_WBUF_ENABLE)
414 bic r2, r2, #(CPU_CONTROL_IC_ENABLE)
415 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
416 mcr p15, 0, r2, c1, c0, 0
417 nop
418 nop
419 nop
420 CPWAIT(r0)
421
422 #if ARM_MMU_V6
423 bl armv6_idcache_inv_all /* Modifies r0 only */
424 #elif ARM_MMU_V7
425 bl armv7_idcache_inv_all /* Modifies r0-r3, ip */
426 #endif
427
428 /* Load the page table physical address */
429 adr r0, Lstartup_pagetable_secondary
430 bl translate_va_to_pa
431 /* Load the address the secondary page table */
432 ldr r0, [r0]
433
434 orr r0, r0, #2 /* Set TTB shared memory flag */
435 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
436 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
437
438 mov r0, #0
439 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */
440
441 /* Set the Domain Access register. Very important! */
442 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
443 mcr p15, 0, r0, c3, c0, 0
444 /* Enable MMU */
445 mrc p15, 0, r0, c1, c0, 0
446 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE
447 orr r0, r0, #CPU_CONTROL_AF_ENABLE
448 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
449 CPU_CONTROL_WBUF_ENABLE)
450 orr r0, r0, #(CPU_CONTROL_IC_ENABLE)
451 orr r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
452 mcr p15, 0, r0, c1, c0, 0
453 nop
454 nop
455 nop
456 CPWAIT(r0)
457
458 adr r1, .Lstart
459 ldmia r1, {r1, r2, sp} /* Set initial stack and */
460 mrc p15, 0, r0, c0, c0, 5
461 and r0, r0, #15
462 mov r1, #2048
463 mul r2, r1, r0
464 sub sp, sp, r2
465 str r1, [sp]
466 ldr pc, .Lmpvirt_done
467
468 mpvirt_done:
469
470 mov fp, #0 /* trace back starts here */
471 bl _C_LABEL(init_secondary) /* Off we go */
472
473 adr r0, .Lmpreturned
474 b _C_LABEL(panic)
475 /* NOTREACHED */
476
477 .Lmpreturned:
478 .asciz "init_secondary() returned"
479 .align 2
480 END(mpentry)
481 #endif
482
483 ENTRY_NP(cpu_halt)
484 mrs r2, cpsr
485 bic r2, r2, #(PSR_MODE)
486 orr r2, r2, #(PSR_SVC32_MODE)
487 orr r2, r2, #(PSR_I | PSR_F)
488 msr cpsr_fsxc, r2
489
490 ldr r4, .Lcpu_reset_address
491 ldr r4, [r4]
492
493 ldr r0, .Lcpufuncs
494 mov lr, pc
495 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
496 mov lr, pc
497 ldr pc, [r0, #CF_L2CACHE_WBINV_ALL]
498
499 /*
500 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
501 * necessary.
502 */
503
504 ldr r1, .Lcpu_reset_needs_v4_MMU_disable
505 ldr r1, [r1]
506 cmp r1, #0
507 mov r2, #0
508
509 /*
510 * MMU & IDC off, 32 bit program & data space
511 * Hurl ourselves into the ROM
512 */
513 mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
514 mcr p15, 0, r0, c1, c0, 0
515 mcrne p15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */
516 mov pc, r4
517
518 /*
519 * _cpu_reset_address contains the address to branch to, to complete
520 * the cpu reset after turning the MMU off
521 * This variable is provided by the hardware specific code
522 */
523 .Lcpu_reset_address:
524 .word _C_LABEL(cpu_reset_address)
525
526 /*
527 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
528 * v4 MMU disable instruction needs executing... it is an illegal instruction
529 * on f.e. ARM6/7 that locks up the computer in an endless illegal
530 * instruction / data-abort / reset loop.
531 */
532 .Lcpu_reset_needs_v4_MMU_disable:
533 .word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
534 END(cpu_halt)
535
536
537 /*
538 * setjump + longjmp
539 */
540 ENTRY(setjmp)
541 stmia r0, {r4-r14}
542 mov r0, #0x00000000
543 RET
544 END(setjmp)
545
546 ENTRY(longjmp)
547 ldmia r0, {r4-r14}
548 mov r0, #0x00000001
549 RET
550 END(longjmp)
551
552 .data
553 .global _C_LABEL(esym)
554 _C_LABEL(esym): .word _C_LABEL(end)
555
556 ENTRY_NP(abort)
557 b _C_LABEL(abort)
558 END(abort)
559
560 ENTRY_NP(sigcode)
561 mov r0, sp
562 add r0, r0, #SIGF_UC
563
564 /*
565 * Call the sigreturn system call.
566 *
567 * We have to load r7 manually rather than using
568 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
569 * correct. Using the alternative places esigcode at the address
570 * of the data rather than the address one past the data.
571 */
572
573 ldr r7, [pc, #12] /* Load SYS_sigreturn */
574 swi SYS_sigreturn
575
576 /* Well if that failed we better exit quick ! */
577
578 ldr r7, [pc, #8] /* Load SYS_exit */
579 swi SYS_exit
580
581 /* Branch back to retry SYS_sigreturn */
582 b . - 16
583 END(sigcode)
584 .word SYS_sigreturn
585 .word SYS_exit
586
587 .align 2
588 .global _C_LABEL(esigcode)
589 _C_LABEL(esigcode):
590
591 .data
592 .global szsigcode
593 szsigcode:
594 .long esigcode-sigcode
595
596 /* End of locore.S */
Cache object: 51f89175b9266c355ca7dc46d5f6113e
|