1 /* $NetBSD: cpufunc_asm_sa1.S,v 1.8 2002/08/17 16:36:32 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997,1998 Mark Brinicombe.
5 * Copyright (c) 1997 Causality Limited
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * SA-1 assembly functions for CPU / MMU / TLB specific operations
36 *
37 */
38
39 #include <machine/asm.h>
40 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/cpufunc_asm_sa1.S 248361 2013-03-16 02:48:49Z andrew $");
41
42 .Lblock_userspace_access:
43 .word _C_LABEL(block_userspace_access)
44
45 /*
46 * Functions to set the MMU Translation Table Base register
47 *
48 * We need to clean and flush the cache as it uses virtual
49 * addresses that are about to change.
50 */
51 ENTRY(getttb)
52 mrc p15, 0, r0, c2, c0, 0
53 ENTRY(sa1_setttb)
54 #ifdef CACHE_CLEAN_BLOCK_INTR
55 mrs r3, cpsr_all
56 orr r1, r3, #(I32_bit | F32_bit)
57 msr cpsr_all, r1
58 #else
59 ldr r3, .Lblock_userspace_access
60 ldr r2, [r3]
61 orr r1, r2, #1
62 str r1, [r3]
63 #endif
64 stmfd sp!, {r0-r3, lr}
65 bl _C_LABEL(sa1_cache_cleanID)
66 ldmfd sp!, {r0-r3, lr}
67 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
68 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
69
70 /* Write the TTB */
71 mcr p15, 0, r0, c2, c0, 0
72
73 /* If we have updated the TTB we must flush the TLB */
74 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
75
76 /* The cleanID above means we only need to flush the I cache here */
77 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
78
79 /* Make sure that pipeline is emptied */
80 mov r0, r0
81 mov r0, r0
82 #ifdef CACHE_CLEAN_BLOCK_INTR
83 msr cpsr_all, r3
84 #else
85 str r2, [r3]
86 #endif
87 RET
88 END(getttb)
89 END(sa1_setttb)
90
91 /*
92 * TLB functions
93 */
94 ENTRY(sa1_tlb_flushID_SE)
95 mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
96 mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
97 RET
98 END(sa1_tlb_flushID_SE)
99
100 /*
101 * Cache functions
102 */
103 ENTRY(sa1_cache_flushID)
104 mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
105 RET
106 END(sa1_cache_flushID)
107
108 ENTRY(sa1_cache_flushI)
109 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
110 RET
111 END(sa1_cache_flushI)
112
113 ENTRY(sa1_cache_flushD)
114 mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
115 RET
116 END(sa1_cache_flushD)
117
118 ENTRY(sa1_cache_flushD_SE)
119 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
120 RET
121 END(sa1_cache_flushD_SE)
122
123 ENTRY(sa1_cache_cleanD_E)
124 mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
125 RET
126 END(sa1_cache_cleanD_E)
127
128 /*
129 * Information for the SA-1 cache clean/purge functions:
130 *
131 * * Virtual address of the memory region to use
132 * * Size of memory region
133 */
134 .data
135
136 .global _C_LABEL(sa1_cache_clean_addr)
137 _C_LABEL(sa1_cache_clean_addr):
138 .word 0xf0000000
139
140 .global _C_LABEL(sa1_cache_clean_size)
141 _C_LABEL(sa1_cache_clean_size):
142 #if defined(CPU_SA1100) || defined(CPU_SA1110)
143 .word 0x00004000
144 #else
145 .word 0x00008000
146 #endif
147
148 .text
149
150 .Lsa1_cache_clean_addr:
151 .word _C_LABEL(sa1_cache_clean_addr)
152 .Lsa1_cache_clean_size:
153 .word _C_LABEL(sa1_cache_clean_size)
154
155 #ifdef CACHE_CLEAN_BLOCK_INTR
156 #define SA1_CACHE_CLEAN_BLOCK \
157 mrs r3, cpsr_all ; \
158 orr r0, r3, #(I32_bit | F32_bit) ; \
159 msr cpsr_all, r0
160
161 #define SA1_CACHE_CLEAN_UNBLOCK \
162 msr cpsr_all, r3
163 #else
164 #define SA1_CACHE_CLEAN_BLOCK \
165 ldr r3, .Lblock_userspace_access ; \
166 ldr ip, [r3] ; \
167 orr r0, ip, #1 ; \
168 str r0, [r3]
169
170 #define SA1_CACHE_CLEAN_UNBLOCK \
171 str ip, [r3]
172 #endif /* CACHE_CLEAN_BLOCK_INTR */
173
174 #ifdef DOUBLE_CACHE_CLEAN_BANK
175 #define SA1_DOUBLE_CACHE_CLEAN_BANK \
176 eor r0, r0, r1 ; \
177 str r0, [r2]
178 #else
179 #define SA1_DOUBLE_CACHE_CLEAN_BANK /* nothing */
180 #endif /* DOUBLE_CACHE_CLEAN_BANK */
181
182 #define SA1_CACHE_CLEAN_PROLOGUE \
183 SA1_CACHE_CLEAN_BLOCK ; \
184 ldr r2, .Lsa1_cache_clean_addr ; \
185 ldmia r2, {r0, r1} ; \
186 SA1_DOUBLE_CACHE_CLEAN_BANK
187
188 #define SA1_CACHE_CLEAN_EPILOGUE \
189 SA1_CACHE_CLEAN_UNBLOCK
190
191 ENTRY_NP(sa1_cache_syncI)
192 ENTRY_NP(sa1_cache_purgeID)
193 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
194 ENTRY_NP(sa1_cache_cleanID)
195 ENTRY_NP(sa1_cache_purgeD)
196 ENTRY(sa1_cache_cleanD)
197 SA1_CACHE_CLEAN_PROLOGUE
198
199 1: ldr r2, [r0], #32
200 subs r1, r1, #32
201 bne 1b
202
203 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
204
205 SA1_CACHE_CLEAN_EPILOGUE
206 RET
207 END(sa1_cache_syncI)
208 END(sa1_cache_purgeID)
209 END(sa1_cache_cleanID)
210 END(sa1_cache_purgeD)
211 END(sa1_cache_cleanD)
212
213 ENTRY(sa1_cache_purgeID_E)
214 mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
215 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
216 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
217 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
218 RET
219 END(sa1_cache_purgeID_E)
220
221 ENTRY(sa1_cache_purgeD_E)
222 mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
223 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
224 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
225 RET
226 END(sa1_cache_purgeD_E)
227
228 /*
229 * Soft functions
230 */
231 /* sa1_cache_syncI is identical to sa1_cache_purgeID */
232
233 ENTRY(sa1_cache_cleanID_rng)
234 ENTRY(sa1_cache_cleanD_rng)
235 cmp r1, #0x4000
236 bcs _C_LABEL(sa1_cache_cleanID)
237
238 and r2, r0, #0x1f
239 add r1, r1, r2
240 bic r0, r0, #0x1f
241
242 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
243 add r0, r0, #32
244 subs r1, r1, #32
245 bhi 1b
246
247 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
248 RET
249 END(sa1_cache_cleanID_rng)
250 END(sa1_cache_cleanD_rng)
251
252 ENTRY(sa1_cache_purgeID_rng)
253 cmp r1, #0x4000
254 bcs _C_LABEL(sa1_cache_purgeID)
255
256 and r2, r0, #0x1f
257 add r1, r1, r2
258 bic r0, r0, #0x1f
259
260 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
261 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
262 add r0, r0, #32
263 subs r1, r1, #32
264 bhi 1b
265
266 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
267 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
268 RET
269 END(sa1_cache_purgeID_rng)
270
271 ENTRY(sa1_cache_purgeD_rng)
272 cmp r1, #0x4000
273 bcs _C_LABEL(sa1_cache_purgeD)
274
275 and r2, r0, #0x1f
276 add r1, r1, r2
277 bic r0, r0, #0x1f
278
279 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
280 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
281 add r0, r0, #32
282 subs r1, r1, #32
283 bhi 1b
284
285 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
286 RET
287 END(sa1_cache_purgeD_rng)
288
289 ENTRY(sa1_cache_syncI_rng)
290 cmp r1, #0x4000
291 bcs _C_LABEL(sa1_cache_syncI)
292
293 and r2, r0, #0x1f
294 add r1, r1, r2
295 bic r0, r0, #0x1f
296
297 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
298 add r0, r0, #32
299 subs r1, r1, #32
300 bhi 1b
301
302 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
303 mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
304
305 RET
306 END(sa1_cache_syncI_rng)
307
308 /*
309 * Context switch.
310 *
311 * These is the CPU-specific parts of the context switcher cpu_switch()
312 * These functions actually perform the TTB reload.
313 *
314 * NOTE: Special calling convention
315 * r1, r4-r13 must be preserved
316 */
317 #if defined(CPU_SA110)
318 ENTRY(sa110_context_switch)
319 /*
320 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
321 * Thus the data cache will contain only kernel data and the
322 * instruction cache will contain only kernel code, and all
323 * kernel mappings are shared by all processes.
324 */
325
326 /* Write the TTB */
327 mcr p15, 0, r0, c2, c0, 0
328
329 /* If we have updated the TTB we must flush the TLB */
330 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
331
332 /* Make sure that pipeline is emptied */
333 mov r0, r0
334 mov r0, r0
335 RET
336 END(sa110_context_switch)
337 #endif
Cache object: 905395fbb97a2e822515292e95606929
|