1 /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Olivier Houchard
5 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
6 * All rights reserved.
7 *
8 * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 *
38 */
39
40 /*-
41 * Copyright (c) 2001 Matt Thomas.
42 * Copyright (c) 1997,1998 Mark Brinicombe.
43 * Copyright (c) 1997 Causality Limited
44 * All rights reserved.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Causality Limited.
57 * 4. The name of Causality Limited may not be used to endorse or promote
58 * products derived from this software without specific prior written
59 * permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
62 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
63 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
64 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
65 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
66 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
67 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * XScale core 3 assembly functions for CPU / MMU / TLB specific operations
74 */
75
76 #include <machine/asm.h>
77 __FBSDID("$FreeBSD$");
78
79 /*
80 * Size of the XScale core D-cache.
81 */
82 #define DCACHE_SIZE 0x00008000
83
84 .Lblock_userspace_access:
85 .word _C_LABEL(block_userspace_access)
86
87 /*
88 * CPWAIT -- Canonical method to wait for CP15 update.
89 * From: Intel 80200 manual, section 2.3.3.
90 *
91 * NOTE: Clobbers the specified temp reg.
92 */
93 #define CPWAIT_BRANCH \
94 sub pc, pc, #4
95
96 #define CPWAIT(tmp) \
97 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
98 mov tmp, tmp /* wait for it to complete */ ;\
99 CPWAIT_BRANCH /* branch to next insn */
100
101 #define CPWAIT_AND_RETURN_SHIFTER lsr #32
102
103 #define CPWAIT_AND_RETURN(tmp) \
104 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\
105 /* Wait for it to complete and branch to the return address */ \
106 sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
107
108 #define ARM_USE_L2_CACHE
109
110 #define L2_CACHE_SIZE 0x80000
111 #define L2_CACHE_WAYS 8
112 #define L2_CACHE_LINE_SIZE 32
113 #define L2_CACHE_SETS (L2_CACHE_SIZE / \
114 (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
115
116 #define L1_DCACHE_SIZE 32 * 1024
117 #define L1_DCACHE_WAYS 4
118 #define L1_DCACHE_LINE_SIZE 32
119 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \
120 (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
121 #ifdef CACHE_CLEAN_BLOCK_INTR
122 #define XSCALE_CACHE_CLEAN_BLOCK \
123 stmfd sp!, {r4} ; \
124 mrs r4, cpsr_all ; \
125 orr r0, r4, #(I32_bit | F32_bit) ; \
126 msr cpsr_all, r0
127
128 #define XSCALE_CACHE_CLEAN_UNBLOCK \
129 msr cpsr_all, r4 ; \
130 ldmfd sp!, {r4}
131 #else
132 #define XSCALE_CACHE_CLEAN_BLOCK \
133 stmfd sp!, {r4} ; \
134 ldr r4, .Lblock_userspace_access ; \
135 ldr ip, [r4] ; \
136 orr r0, ip, #1 ; \
137 str r0, [r4]
138
139 #define XSCALE_CACHE_CLEAN_UNBLOCK \
140 str ip, [r3] ; \
141 ldmfd sp!, {r4}
142 #endif /* CACHE_CLEAN_BLOCK_INTR */
143
144
145 ENTRY_NP(xscalec3_cache_syncI)
146 ENTRY_NP(xscalec3_cache_purgeID)
147 mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
148 ENTRY_NP(xscalec3_cache_cleanID)
149 ENTRY_NP(xscalec3_cache_purgeD)
150 ENTRY(xscalec3_cache_cleanD)
151
152 XSCALE_CACHE_CLEAN_BLOCK
153 mov r0, #0
154 1:
155 mov r1, r0, asl #30
156 mov r2, #0
157 2:
158 orr r3, r1, r2, asl #5
159 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */
160 add r2, r2, #1
161 cmp r2, #L1_DCACHE_SETS
162 bne 2b
163 add r0, r0, #1
164 cmp r0, #4
165 bne 1b
166 CPWAIT(r0)
167 XSCALE_CACHE_CLEAN_UNBLOCK
168 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
169
170 RET
171
172 ENTRY(xscalec3_cache_purgeID_rng)
173
174 cmp r1, #0x4000
175 bcs _C_LABEL(xscalec3_cache_cleanID)
176 and r2, r0, #0x1f
177 add r1, r1, r2
178 bic r0, r0, #0x1f
179
180 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */
181 nop
182 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
183 add r0, r0, #32
184 subs r1, r1, #32
185 bhi 1b
186
187 CPWAIT(r0)
188
189 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
190
191 CPWAIT_AND_RETURN(r0)
192
193 ENTRY(xscalec3_cache_syncI_rng)
194 cmp r1, #0x4000
195 bcs _C_LABEL(xscalec3_cache_syncI)
196
197 and r2, r0, #0x1f
198 add r1, r1, r2
199 bic r0, r0, #0x1f
200
201 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
202 mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
203 add r0, r0, #32
204 subs r1, r1, #32
205 bhi 1b
206
207 CPWAIT(r0)
208
209 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
210
211 CPWAIT_AND_RETURN(r0)
212
213 ENTRY(xscalec3_cache_purgeD_rng)
214
215 cmp r1, #0x4000
216 bcs _C_LABEL(xscalec3_cache_cleanID)
217 and r2, r0, #0x1f
218 add r1, r1, r2
219 bic r0, r0, #0x1f
220
221 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */
222 add r0, r0, #32
223 subs r1, r1, #32
224 bhi 1b
225
226 CPWAIT(r0)
227
228 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
229
230 CPWAIT_AND_RETURN(r0)
231 ENTRY(xscalec3_cache_cleanID_rng)
232 ENTRY(xscalec3_cache_cleanD_rng)
233
234 cmp r1, #0x4000
235 bcs _C_LABEL(xscalec3_cache_cleanID)
236 and r2, r0, #0x1f
237 add r1, r1, r2
238 bic r0, r0, #0x1f
239
240 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */
241 nop
242 add r0, r0, #32
243 subs r1, r1, #32
244 bhi 1b
245
246 CPWAIT(r0)
247
248 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
249
250 CPWAIT_AND_RETURN(r0)
251
252
253 ENTRY(xscalec3_l2cache_purge)
254 /* Clean-up the L2 cache */
255 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
256 mov r0, #0
257 1:
258 mov r1, r0, asl #29
259 mov r2, #0
260 2:
261 orr r3, r1, r2, asl #5
262 mcr p15, 1, r3, c7, c15, 2
263 add r2, r2, #1
264 cmp r2, #L2_CACHE_SETS
265 bne 2b
266 add r0, r0, #1
267 cmp r0, #8
268 bne 1b
269 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
270
271 CPWAIT(r0)
272 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
273 RET
274
275 ENTRY(xscalec3_l2cache_clean_rng)
276 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
277
278 and r2, r0, #0x1f
279 add r1, r1, r2
280 bic r0, r0, #0x1f
281
282 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
283 add r0, r0, #32
284 subs r1, r1, #32
285 bhi 1b
286
287
288 CPWAIT(r0)
289
290 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
291 mcr p15, 0, r0, c7, c10, 5
292
293 CPWAIT_AND_RETURN(r0)
294
295 ENTRY(xscalec3_l2cache_purge_rng)
296
297 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
298
299 and r2, r0, #0x1f
300 add r1, r1, r2
301 bic r0, r0, #0x1f
302
303 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */
304 mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */
305 add r0, r0, #32
306 subs r1, r1, #32
307 bhi 1b
308
309 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
310 mcr p15, 0, r0, c7, c10, 5
311
312 CPWAIT_AND_RETURN(r0)
313
314 ENTRY(xscalec3_l2cache_flush_rng)
315 mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
316
317 and r2, r0, #0x1f
318 add r1, r1, r2
319 bic r0, r0, #0x1f
320
321 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */
322 add r0, r0, #32
323 subs r1, r1, #32
324 bhi 1b
325 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
326 mcr p15, 0, r0, c7, c10, 5
327 CPWAIT_AND_RETURN(r0)
328 /*
329 * Functions to set the MMU Translation Table Base register
330 *
331 * We need to clean and flush the cache as it uses virtual
332 * addresses that are about to change.
333 */
334 ENTRY(xscalec3_setttb)
335 #ifdef CACHE_CLEAN_BLOCK_INTR
336 mrs r3, cpsr_all
337 orr r1, r3, #(I32_bit | F32_bit)
338 msr cpsr_all, r1
339 #else
340 ldr r3, .Lblock_userspace_access
341 ldr r2, [r3]
342 orr r1, r2, #1
343 str r1, [r3]
344 #endif
345 stmfd sp!, {r0-r3, lr}
346 bl _C_LABEL(xscalec3_cache_cleanID)
347 mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
348 mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
349
350 CPWAIT(r0)
351
352 ldmfd sp!, {r0-r3, lr}
353
354 #ifdef ARM_USE_L2_CACHE
355 orr r0, r0, #0x18 /* cache the page table in L2 */
356 #endif
357 /* Write the TTB */
358 mcr p15, 0, r0, c2, c0, 0
359
360 /* If we have updated the TTB we must flush the TLB */
361 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
362
363 CPWAIT(r0)
364
365 #ifdef CACHE_CLEAN_BLOCK_INTR
366 msr cpsr_all, r3
367 #else
368 str r2, [r3]
369 #endif
370 RET
371
372 /*
373 * Context switch.
374 *
375 * These is the CPU-specific parts of the context switcher cpu_switch()
376 * These functions actually perform the TTB reload.
377 *
378 * NOTE: Special calling convention
379 * r1, r4-r13 must be preserved
380 */
381 ENTRY(xscalec3_context_switch)
382 /*
383 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
384 * Thus the data cache will contain only kernel data and the
385 * instruction cache will contain only kernel code, and all
386 * kernel mappings are shared by all processes.
387 */
388 #ifdef ARM_USE_L2_CACHE
389 orr r0, r0, #0x18 /* Cache the page table in L2 */
390 #endif
391 /* Write the TTB */
392 mcr p15, 0, r0, c2, c0, 0
393
394 /* If we have updated the TTB we must flush the TLB */
395 mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
396
397 CPWAIT_AND_RETURN(r0)
Cache object: 29dbdb215e83aff52443430e39ebd781
|