1 /* $NetBSD: cpufunc_asm_sa1.S,v 1.8 2002/08/17 16:36:32 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997,1998 Mark Brinicombe.
5 * Copyright (c) 1997 Causality Limited
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * SA-1 assembly functions for CPU / MMU / TLB specific operations
36 *
37 */
38
39 #include <machine/asm.h>
40 __FBSDID("$FreeBSD: releng/6.2/sys/arm/arm/cpufunc_asm_sa1.S 139735 2005-01-05 21:58:49Z imp $");
41
42 .Lblock_userspace_access:
43 .word _C_LABEL(block_userspace_access)
44
45 /*
46 * Functions to set the MMU Translation Table Base register
47 *
48 * We need to clean and flush the cache as it uses virtual
49 * addresses that are about to change.
50 */
51 ENTRY(getttb)
52 mrc p15, 0, r0, c2, c0, 0
53 ENTRY(sa1_setttb)
54 #ifdef CACHE_CLEAN_BLOCK_INTR
55 mrs r3, cpsr_all
56 orr r1, r3, #(I32_bit | F32_bit)
57 msr cpsr_all, r1
58 #else
59 ldr r3, .Lblock_userspace_access
60 ldr r2, [r3]
61 orr r1, r2, #1
62 str r1, [r3]
63 #endif
64 stmfd sp!, {r0-r3, lr}
65 bl _C_LABEL(sa1_cache_cleanID)
66 ldmfd sp!, {r0-r3, lr}
67 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
68 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
69
70 /* Write the TTB */
71 mcr p15, 0, r0, c2, c0, 0
72
73 /* If we have updated the TTB we must flush the TLB */
74 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
75
76 /* The cleanID above means we only need to flush the I cache here */
77 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
78
79 /* Make sure that pipeline is emptied */
80 mov r0, r0
81 mov r0, r0
82 #ifdef CACHE_CLEAN_BLOCK_INTR
83 msr cpsr_all, r3
84 #else
85 str r2, [r3]
86 #endif
87 RET
88
89 /*
90 * TLB functions
91 */
92 ENTRY(sa1_tlb_flushID_SE)
93 mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
94 mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
95 RET
96
97 /*
98 * Cache functions
99 */
100 ENTRY(sa1_cache_flushID)
101 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
102 RET
103
104 ENTRY(sa1_cache_flushI)
105 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
106 RET
107
108 ENTRY(sa1_cache_flushD)
109 mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
110 RET
111
112 ENTRY(sa1_cache_flushD_SE)
113 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
114 RET
115
116 ENTRY(sa1_cache_cleanD_E)
117 mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
118 RET
119
120 /*
121 * Information for the SA-1 cache clean/purge functions:
122 *
123 * * Virtual address of the memory region to use
124 * * Size of memory region
125 */
126 .data
127
128 .global _C_LABEL(sa1_cache_clean_addr)
129 _C_LABEL(sa1_cache_clean_addr):
130 .word 0xf0000000
131
132 .global _C_LABEL(sa1_cache_clean_size)
133 _C_LABEL(sa1_cache_clean_size):
134 #if defined(CPU_SA1100) || defined(CPU_SA1110)
135 .word 0x00004000
136 #else
137 .word 0x00008000
138 #endif
139
140 .text
141
142 .Lsa1_cache_clean_addr:
143 .word _C_LABEL(sa1_cache_clean_addr)
144 .Lsa1_cache_clean_size:
145 .word _C_LABEL(sa1_cache_clean_size)
146
147 #ifdef CACHE_CLEAN_BLOCK_INTR
148 #define SA1_CACHE_CLEAN_BLOCK \
149 mrs r3, cpsr_all ; \
150 orr r0, r3, #(I32_bit | F32_bit) ; \
151 msr cpsr_all, r0
152
153 #define SA1_CACHE_CLEAN_UNBLOCK \
154 msr cpsr_all, r3
155 #else
156 #define SA1_CACHE_CLEAN_BLOCK \
157 ldr r3, .Lblock_userspace_access ; \
158 ldr ip, [r3] ; \
159 orr r0, ip, #1 ; \
160 str r0, [r3]
161
162 #define SA1_CACHE_CLEAN_UNBLOCK \
163 str ip, [r3]
164 #endif /* CACHE_CLEAN_BLOCK_INTR */
165
166 #ifdef DOUBLE_CACHE_CLEAN_BANK
167 #define SA1_DOUBLE_CACHE_CLEAN_BANK \
168 eor r0, r0, r1 ; \
169 str r0, [r2]
170 #else
171 #define SA1_DOUBLE_CACHE_CLEAN_BANK /* nothing */
172 #endif /* DOUBLE_CACHE_CLEAN_BANK */
173
174 #define SA1_CACHE_CLEAN_PROLOGUE \
175 SA1_CACHE_CLEAN_BLOCK ; \
176 ldr r2, .Lsa1_cache_clean_addr ; \
177 ldmia r2, {r0, r1} ; \
178 SA1_DOUBLE_CACHE_CLEAN_BANK
179
180 #define SA1_CACHE_CLEAN_EPILOGUE \
181 SA1_CACHE_CLEAN_UNBLOCK
182
183 ENTRY_NP(sa1_cache_syncI)
184 ENTRY_NP(sa1_cache_purgeID)
185 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
186 ENTRY_NP(sa1_cache_cleanID)
187 ENTRY_NP(sa1_cache_purgeD)
188 ENTRY(sa1_cache_cleanD)
189 SA1_CACHE_CLEAN_PROLOGUE
190
191 1: ldr r2, [r0], #32
192 subs r1, r1, #32
193 bne 1b
194
195 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
196
197 SA1_CACHE_CLEAN_EPILOGUE
198 RET
199
200 ENTRY(sa1_cache_purgeID_E)
201 mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
202 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
203 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
204 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
205 RET
206
207 ENTRY(sa1_cache_purgeD_E)
208 mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
209 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
210 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
211 RET
212
213 /*
214 * Soft functions
215 */
216 /* sa1_cache_syncI is identical to sa1_cache_purgeID */
217
218 ENTRY(sa1_cache_cleanID_rng)
219 ENTRY(sa1_cache_cleanD_rng)
220 cmp r1, #0x4000
221 bcs _C_LABEL(sa1_cache_cleanID)
222
223 and r2, r0, #0x1f
224 add r1, r1, r2
225 bic r0, r0, #0x1f
226
227 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
228 add r0, r0, #32
229 subs r1, r1, #32
230 bhi 1b
231
232 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
233 RET
234
235 ENTRY(sa1_cache_purgeID_rng)
236 cmp r1, #0x4000
237 bcs _C_LABEL(sa1_cache_purgeID)
238
239 and r2, r0, #0x1f
240 add r1, r1, r2
241 bic r0, r0, #0x1f
242
243 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
244 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
245 add r0, r0, #32
246 subs r1, r1, #32
247 bhi 1b
248
249 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
250 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
251 RET
252
253 ENTRY(sa1_cache_purgeD_rng)
254 cmp r1, #0x4000
255 bcs _C_LABEL(sa1_cache_purgeD)
256
257 and r2, r0, #0x1f
258 add r1, r1, r2
259 bic r0, r0, #0x1f
260
261 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
262 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
263 add r0, r0, #32
264 subs r1, r1, #32
265 bhi 1b
266
267 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
268 RET
269
270 ENTRY(sa1_cache_syncI_rng)
271 cmp r1, #0x4000
272 bcs _C_LABEL(sa1_cache_syncI)
273
274 and r2, r0, #0x1f
275 add r1, r1, r2
276 bic r0, r0, #0x1f
277
278 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
279 add r0, r0, #32
280 subs r1, r1, #32
281 bhi 1b
282
283 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
284 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
285
286 RET
287
288 /*
289 * Context switch.
290 *
291 * These is the CPU-specific parts of the context switcher cpu_switch()
292 * These functions actually perform the TTB reload.
293 *
294 * NOTE: Special calling convention
295 * r1, r4-r13 must be preserved
296 */
297 #if defined(CPU_SA110)
298 ENTRY(sa110_context_switch)
299 /*
300 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
301 * Thus the data cache will contain only kernel data and the
302 * instruction cache will contain only kernel code, and all
303 * kernel mappings are shared by all processes.
304 */
305
306 /* Write the TTB */
307 mcr p15, 0, r0, c2, c0, 0
308
309 /* If we have updated the TTB we must flush the TLB */
310 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
311
312 /* Make sure that pipeline is emptied */
313 mov r0, r0
314 mov r0, r0
315 RET
316 #endif
Cache object: e2354b501bab0fdb10c1cb4ae6d279e5
|