FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/locore.S
1 /* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
2
3 /*-
4 * Copyright (C) 1994-1997 Mark Brinicombe
5 * Copyright (C) 1994 Brini
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Brini.
19 * 4. The name of Brini may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include "assym.s"
36 #include <sys/syscall.h>
37 #include <machine/asm.h>
38 #include <machine/armreg.h>
39 #include <machine/pte.h>
40 __FBSDID("$FreeBSD: releng/6.0/sys/arm/arm/locore.S 143681 2005-03-16 07:53:02Z jmg $");
41
42 /* What size should this really be ? It is only used by init_arm() */
43 #define INIT_ARM_STACK_SIZE 2048
44
45 /*
46 * This is for kvm_mkdb, and should be the address of the beginning
47 * of the kernel text segment (not necessarily the same as kernbase).
48 */
49
50
51 #define CPWAIT_BRANCH \
52 sub pc, pc, #4
53
54 #define CPWAIT(tmp) \
55 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
56 mov tmp, tmp /* wait for it to complete */ ;\
57 CPWAIT_BRANCH /* branch to next insn */
58
59 .text
60 .align 0
61 .globl kernbase
62 .set kernbase,KERNBASE
63
64 ENTRY_NP(btext)
65
66 ASENTRY_NP(_start)
67
68
69 /* Check if we are running on RAM, if not move ourself to RAM */
70 #if 0
71 cmp pc, #PHYSADDR
72 bhi start_inram /* XXX: This is wrong */
73 #endif
74 b start_inram /*
75 * XXX: this is even more wrong, but RedBoot
76 * use 0x00000000-0x100000000 as virtual
77 * addresses for the RAM.
78 */
79
80 /* move me to RAM
81 * XXX: we can use memcpy if it is PIC
82 */
83 ldr r1, Lcopy_size
84 adr r0, _C_LABEL(_start)
85 add r1, r1, #3
86 mov r1, r1, LSR #2
87 mov r2, #PHYSADDR
88 add r2, r2, #0x00200000
89 mov r4, r2
90
91 5: ldr r3,[r0],#4
92 str r3,[r2],#4
93 subs r1,r1,#1
94 bhi 5b
95
96 /* Jump to RAM */
97 ldr r0, Lstart_off
98 add pc, r4, r0
99
100 Lcopy_size: .word _edata-_C_LABEL(_start)
101 Lstart_off: .word start_inram-_C_LABEL(_start)
102 start_inram:
103 adr r7, Lunmapped
104 bic r7, r7, #0xff000000
105 orr r7, r7, #PHYSADDR
106
107 /* Disable MMU for a while */
108 mrc p15, 0, r2, c1, c0, 0
109 bic r2, r2, #CPU_CONTROL_MMU_ENABLE
110 mcr p15, 0, r2, c1, c0, 0
111
112 nop
113 nop
114 nop
115 mov pc, r7
116 Lunmapped:
117
118 #ifdef STARTUP_PAGETABLE_ADDR
119 /* build page table from scratch */
120 ldr r0, Lstartup_pagetable
121 adr r4, mmu_init_table
122 b 3f
123
124 2:
125 str r3, [r0, r2]
126 add r2, r2, #4
127 add r3, r3, #(L1_S_SIZE)
128 adds r1, r1, #-1
129 bhi 2b
130 3:
131 ldmia r4!, {r1,r2,r3} /* # of sections, PA|attr, VA */
132 cmp r1, #0
133 adrne r5, 2b
134 bicne r5, r5, #0xff000000
135 orrne r5, r5, #PHYSADDR
136 movne pc, r5
137
138 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
139 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
140
141 /* Set the Domain Access register. Very important! */
142 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
143 mcr p15, 0, r0, c3, c0, 0
144
145 /* Enable MMU */
146 mrc p15, 0, r0, c1, c0, 0
147 orr r0, r0, #CPU_CONTROL_MMU_ENABLE
148 mcr p15, 0, r0, c1, c0, 0
149 CPWAIT(r0)
150
151 bl mmu_done
152 mmu_done:
153 #endif
154 adr r1, .Lstart
155 ldmia r1, {r1, r2, sp} /* Set initial stack and */
156 sub r2, r2, r1 /* get zero init data */
157 mov r3, #0
158
159 .L1:
160 str r3, [r1], #0x0004 /* Zero the bss */
161 subs r2, r2, #4
162 bgt .L1
163
164 ldr r4, =KERNVIRTADDR
165 cmp pc, r4
166 #if KERNVIRTADDR > KERNPHYSADDR
167 bgt virt_done
168 ldr r4, =KERNVIRTADDR
169 ldr r5, =KERNPHYSADDR
170 sub r4, r4, r5
171 add pc, pc, r4
172 #else
173 blt virt_done
174 ldr r4, =KERNPHYSADDR
175 ldr r5, =KERNVIRTADDR
176 sub r4, r4, r5
177 sub pc, pc, r4
178 #endif
179 virt_done:
180 ldr fp, =KERNVIRTADDR /* trace back starts here */
181 bl _C_LABEL(initarm) /* Off we go */
182
183 /* init arm will return the new stack pointer. */
184 mov sp, r0
185
186 bl _C_LABEL(mi_startup) /* call mi_startup()! */
187
188 adr r0, .Lmainreturned
189 b _C_LABEL(panic)
190 /* NOTEACHED */
191 #ifdef STARTUP_PAGETABLE_ADDR
192 #define MMU_INIT(va,pa,n_sec,attr) \
193 .word n_sec ; \
194 .word 4*((va)>>L1_S_SHIFT) ; \
195 .word (pa)|(attr) ;
196
197 Lstartup_pagetable:
198 .word STARTUP_PAGETABLE_ADDR
199 mmu_init_table:
200 /* fill all table VA==PA */
201 /* map SDRAM VA==PA, WT cacheable */
202 MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
203 /* map VA 0xc0000000..0xc3ffffff to PA */
204 MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
205
206 .word 0 /* end of table */
207 #endif
208 .Lstart:
209 .word _edata
210 .word _end
211 .word svcstk + INIT_ARM_STACK_SIZE
212
213 .Lmainreturned:
214 .asciz "main() returned"
215 .align 0
216
217 .bss
218 svcstk:
219 .space INIT_ARM_STACK_SIZE
220
221 .text
222 .align 0
223
224 #ifndef OFW
225 /* OFW based systems will used OF_boot() */
226
227 .Lcpufuncs:
228 .word _C_LABEL(cpufuncs)
229
230 ENTRY_NP(cpu_halt)
231 mrs r2, cpsr
232 bic r2, r2, #(PSR_MODE)
233 orr r2, r2, #(PSR_SVC32_MODE)
234 orr r2, r2, #(I32_bit | F32_bit)
235 msr cpsr_all, r2
236
237 ldr r4, .Lcpu_reset_address
238 ldr r4, [r4]
239
240 ldr r0, .Lcpufuncs
241 mov lr, pc
242 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
243
244 /*
245 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
246 * necessary.
247 */
248
249 ldr r1, .Lcpu_reset_needs_v4_MMU_disable
250 ldr r1, [r1]
251 cmp r1, #0
252 mov r2, #0
253
254 /*
255 * MMU & IDC off, 32 bit program & data space
256 * Hurl ourselves into the ROM
257 */
258 mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
259 mcr 15, 0, r0, c1, c0, 0
260 mcrne 15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */
261 mov pc, r4
262
263 /*
264 * _cpu_reset_address contains the address to branch to, to complete
265 * the cpu reset after turning the MMU off
266 * This variable is provided by the hardware specific code
267 */
268 .Lcpu_reset_address:
269 .word _C_LABEL(cpu_reset_address)
270
271 /*
272 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
273 * v4 MMU disable instruction needs executing... it is an illegal instruction
274 * on f.e. ARM6/7 that locks up the computer in an endless illegal
275 * instruction / data-abort / reset loop.
276 */
277 .Lcpu_reset_needs_v4_MMU_disable:
278 .word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
279
280 #endif /* OFW */
281
282 #ifdef IPKDB
283 /*
284 * Execute(inst, psr, args, sp)
285 *
286 * Execute INSTruction with PSR and ARGS[0] - ARGS[3] making
287 * available stack at SP for next undefined instruction trap.
288 *
289 * Move the instruction onto the stack and jump to it.
290 */
291 ENTRY_NP(Execute)
292 mov ip, sp
293 stmfd sp!, {r2, r4-r7, fp, ip, lr, pc}
294 sub fp, ip, #4
295 mov ip, r3
296 ldr r7, .Lreturn
297 stmfd sp!, {r0, r7}
298 adr r7, #.LExec
299 mov r5, r1
300 mrs r4, cpsr
301 ldmia r2, {r0-r3}
302 mov r6, sp
303 mov sp, ip
304 msr cpsr_all, r5
305 mov pc, r6
306 .LExec:
307 mrs r5, cpsr
308 /* XXX Cannot switch thus easily back from user mode */
309 msr cpsr_all, r4
310 add sp, r6, #8
311 ldmfd sp!, {r6}
312 stmia r6, {r0-r3}
313 mov r0, r5
314 ldmdb fp, {r4-r7, fp, sp, pc}
315 .Lreturn:
316 mov pc, r7
317 #endif
318
319 /*
320 * setjump + longjmp
321 */
322 ENTRY(setjmp)
323 stmia r0, {r4-r14}
324 mov r0, #0x00000000
325 RET
326
327 ENTRY(longjmp)
328 ldmia r0, {r4-r14}
329 mov r0, #0x00000001
330 RET
331
332 .data
333 .global _C_LABEL(esym)
334 _C_LABEL(esym): .word _C_LABEL(end)
335
336 ENTRY_NP(abort)
337 b _C_LABEL(abort)
338
339 ENTRY_NP(sigcode)
340 mov r0, sp
341 swi SYS_sigreturn
342
343 /* Well if that failed we better exit quick ! */
344
345 swi SYS_exit
346 b . - 8
347
348 .align 0
349 .global _C_LABEL(esigcode)
350 _C_LABEL(esigcode):
351
352 .data
353 .global szsigcode
354 szsigcode:
355 .long esigcode-sigcode
356 /* End of locore.S */
Cache object: ba7d6b3f609573c86558334792539769
|