1 /* $NetBSD: cpufunc_asm_arm8.S,v 1.2 2001/11/11 00:47:49 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997 ARM Limited
5 * Copyright (c) 1997 Causality Limited
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * ARM8 assembly functions for CPU / MMU / TLB specific operations
36 *
37 */
38
39 #include <machine/asm.h>
40 __FBSDID("$FreeBSD: releng/7.3/sys/arm/arm/cpufunc_asm_arm8.S 139735 2005-01-05 21:58:49Z imp $");
41
42 ENTRY(arm8_clock_config)
43 mrc p15, 0, r3, c15, c0, 0 /* Read the clock register */
44 bic r2, r3, #0x11 /* turn off dynamic clocking
45 and clear L bit */
46 mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
47
48 bic r2, r3, r0 /* Clear bits */
49 eor r2, r2, r1 /* XOR bits */
50 bic r2, r2, #0x10 /* clear the L bit */
51
52 bic r1, r2, #0x01 /* still keep dynamic clocking off */
53 mcr p15, 0, r1, c15, c0, 0 /* Write clock register */
54 mov r0, r0 /* NOP */
55 mov r0, r0 /* NOP */
56 mov r0, r0 /* NOP */
57 mov r0, r0 /* NOP */
58 mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
59 mov r0, r3 /* Return old value */
60 RET
61
62 /*
63 * Functions to set the MMU Translation Table Base register
64 *
65 * We need to clean and flush the cache as it uses virtual
66 * addresses that are about to change.
67 */
68 ENTRY(arm8_setttb)
69 mrs r3, cpsr_all
70 orr r1, r3, #(I32_bit | F32_bit)
71 msr cpsr_all, r1
72
73 stmfd sp!, {r0-r3, lr}
74 bl _C_LABEL(arm8_cache_cleanID)
75 ldmfd sp!, {r0-r3, lr}
76 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
77
78 /* Write the TTB */
79 mcr p15, 0, r0, c2, c0, 0
80
81 /* If we have updated the TTB we must flush the TLB */
82 mcr p15, 0, r0, c8, c7, 0
83
84 /* For good measure we will flush the IDC as well */
85 mcr p15, 0, r0, c7, c7, 0
86
87 /* Make sure that pipeline is emptied */
88 mov r0, r0
89 mov r0, r0
90 msr cpsr_all, r3
91
92 RET
93
94 /*
95 * TLB functions
96 */
97 ENTRY(arm8_tlb_flushID)
98 mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
99 RET
100
101 ENTRY(arm8_tlb_flushID_SE)
102 mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */
103 RET
104
105 /*
106 * Cache functions
107 */
108 ENTRY(arm8_cache_flushID)
109 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
110 RET
111
112 ENTRY(arm8_cache_flushID_E)
113 mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
114 RET
115
116 ENTRY(arm8_cache_cleanID)
117 mov r0, #0x00000000
118
119 1: mov r2, r0
120 mcr p15, 0, r2, c7, c11, 1
121 add r2, r2, #0x10
122 mcr p15, 0, r2, c7, c11, 1
123 add r2, r2, #0x10
124 mcr p15, 0, r2, c7, c11, 1
125 add r2, r2, #0x10
126 mcr p15, 0, r2, c7, c11, 1
127 add r2, r2, #0x10
128 mcr p15, 0, r2, c7, c11, 1
129 add r2, r2, #0x10
130 mcr p15, 0, r2, c7, c11, 1
131 add r2, r2, #0x10
132 mcr p15, 0, r2, c7, c11, 1
133 add r2, r2, #0x10
134 mcr p15, 0, r2, c7, c11, 1
135 add r2, r2, #0x10
136 mcr p15, 0, r2, c7, c11, 1
137 add r2, r2, #0x10
138 mcr p15, 0, r2, c7, c11, 1
139 add r2, r2, #0x10
140 mcr p15, 0, r2, c7, c11, 1
141 add r2, r2, #0x10
142 mcr p15, 0, r2, c7, c11, 1
143 add r2, r2, #0x10
144 mcr p15, 0, r2, c7, c11, 1
145 add r2, r2, #0x10
146 mcr p15, 0, r2, c7, c11, 1
147 add r2, r2, #0x10
148 mcr p15, 0, r2, c7, c11, 1
149 add r2, r2, #0x10
150 mcr p15, 0, r2, c7, c11, 1
151
152 adds r0, r0, #0x04000000
153 bne 1b
154
155 RET
156
157 ENTRY(arm8_cache_cleanID_E)
158 mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
159 RET
160
161 ENTRY(arm8_cache_purgeID)
162 /*
163 * ARM810 bug 3
164 *
165 * Clean and invalidate entry will not invalidate the entry
166 * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
167 *
168 * Instead of using the clean and invalidate entry operation
169 * use a separate clean and invalidate entry operations.
170 * i.e.
171 * mcr p15, 0, rd, c7, c11, 1
172 * mcr p15, 0, rd, c7, c7, 1
173 */
174
175 mov r0, #0x00000000
176
177 mrs r3, cpsr_all
178 orr r2, r3, #(I32_bit | F32_bit)
179 msr cpsr_all, r2
180
181 1: mov r2, r0
182 mcr p15, 0, r2, c7, c11, 1
183 mcr p15, 0, r2, c7, c7, 1
184 add r2, r2, #0x10
185 mcr p15, 0, r2, c7, c11, 1
186 mcr p15, 0, r2, c7, c7, 1
187 add r2, r2, #0x10
188 mcr p15, 0, r2, c7, c11, 1
189 mcr p15, 0, r2, c7, c7, 1
190 add r2, r2, #0x10
191 mcr p15, 0, r2, c7, c11, 1
192 mcr p15, 0, r2, c7, c7, 1
193 add r2, r2, #0x10
194 mcr p15, 0, r2, c7, c11, 1
195 mcr p15, 0, r2, c7, c7, 1
196 add r2, r2, #0x10
197 mcr p15, 0, r2, c7, c11, 1
198 mcr p15, 0, r2, c7, c7, 1
199 add r2, r2, #0x10
200 mcr p15, 0, r2, c7, c11, 1
201 mcr p15, 0, r2, c7, c7, 1
202 add r2, r2, #0x10
203 mcr p15, 0, r2, c7, c11, 1
204 mcr p15, 0, r2, c7, c7, 1
205 add r2, r2, #0x10
206 mcr p15, 0, r2, c7, c11, 1
207 mcr p15, 0, r2, c7, c7, 1
208 add r2, r2, #0x10
209 mcr p15, 0, r2, c7, c11, 1
210 mcr p15, 0, r2, c7, c7, 1
211 add r2, r2, #0x10
212 mcr p15, 0, r2, c7, c11, 1
213 mcr p15, 0, r2, c7, c7, 1
214 add r2, r2, #0x10
215 mcr p15, 0, r2, c7, c11, 1
216 mcr p15, 0, r2, c7, c7, 1
217 add r2, r2, #0x10
218 mcr p15, 0, r2, c7, c11, 1
219 mcr p15, 0, r2, c7, c7, 1
220 add r2, r2, #0x10
221 mcr p15, 0, r2, c7, c11, 1
222 mcr p15, 0, r2, c7, c7, 1
223 add r2, r2, #0x10
224 mcr p15, 0, r2, c7, c11, 1
225 mcr p15, 0, r2, c7, c7, 1
226 add r2, r2, #0x10
227 mcr p15, 0, r2, c7, c11, 1
228 mcr p15, 0, r2, c7, c7, 1
229
230 adds r0, r0, #0x04000000
231 bne 1b
232
233 msr cpsr_all, r3
234 RET
235
236 ENTRY(arm8_cache_purgeID_E)
237 /*
238 * ARM810 bug 3
239 *
240 * Clean and invalidate entry will not invalidate the entry
241 * if the line was already clean. (mcr p15, 0, rd, c7, 15, 1)
242 *
243 * Instead of using the clean and invalidate entry operation
244 * use a separate clean and invalidate entry operations.
245 * i.e.
246 * mcr p15, 0, rd, c7, c11, 1
247 * mcr p15, 0, rd, c7, c7, 1
248 */
249 mrs r3, cpsr_all
250 orr r2, r3, #(I32_bit | F32_bit)
251 msr cpsr_all, r2
252 mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
253 mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
254 msr cpsr_all, r3
255 RET
256
257 /*
258 * Context switch.
259 *
260 * These is the CPU-specific parts of the context switcher cpu_switch()
261 * These functions actually perform the TTB reload.
262 *
263 * NOTE: Special calling convention
264 * r1, r4-r13 must be preserved
265 */
266 ENTRY(arm8_context_switch)
267 /* For good measure we will flush the IDC as well */
268 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
269
270 /* Write the TTB */
271 mcr p15, 0, r0, c2, c0, 0
272
273 /* If we have updated the TTB we must flush the TLB */
274 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
275
276 #if 0
277 /* For good measure we will flush the IDC as well */
278 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
279 #endif
280
281 /* Make sure that pipeline is emptied */
282 mov r0, r0
283 mov r0, r0
284 RET
Cache object: 6a9a3bc7b5f022d635f43de689011c63
|