1 /*-
2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3 * All rights reserved.
4 *
5 * Developed by Semihalf.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <machine/asm.h>
33 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/cpufunc_asm_sheeva.S 250695 2013-05-16 09:43:04Z gber $");
34
35 #include <machine/param.h>
36
37 .Lsheeva_cache_line_size:
38 .word _C_LABEL(arm_pdcache_line_size)
39 .Lsheeva_asm_page_mask:
40 .word _C_LABEL(PAGE_MASK)
41
42 ENTRY(sheeva_setttb)
43 /* Disable irqs */
44 mrs r2, cpsr
45 orr r3, r2, #I32_bit | F32_bit
46 msr cpsr_c, r3
47
48 mov r1, #0
49 mcr p15, 0, r1, c7, c5, 0 /* Invalidate ICache */
50 1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
51 bne 1b /* More to do? */
52
53 mcr p15, 1, r1, c15, c9, 0 /* Clean L2 */
54 mcr p15, 1, r1, c15, c11, 0 /* Invalidate L2 */
55
56 /* Reenable irqs */
57 msr cpsr_c, r2
58
59 mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */
60
61 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
62
63 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
64 RET
65 END(sheeva_setttb)
66
67 ENTRY(sheeva_dcache_wbinv_range)
68 str lr, [sp, #-4]!
69 mrs lr, cpsr
70 /* Start with cache line aligned address */
71 ldr ip, .Lsheeva_cache_line_size
72 ldr ip, [ip]
73 sub ip, ip, #1
74 and r2, r0, ip
75 add r1, r1, r2
76 add r1, r1, ip
77 bics r1, r1, ip
78 bics r0, r0, ip
79
80 ldr ip, .Lsheeva_asm_page_mask
81 and r2, r0, ip
82 rsb r2, r2, #PAGE_SIZE
83 cmp r1, r2
84 movcc ip, r1
85 movcs ip, r2
86 1:
87 add r3, r0, ip
88 sub r2, r3, #1
89 /* Disable irqs */
90 orr r3, lr, #I32_bit | F32_bit
91 msr cpsr_c, r3
92 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
93 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
94 /* Enable irqs */
95 msr cpsr_c, lr
96
97 add r0, r0, ip
98 sub r1, r1, ip
99 cmp r1, #PAGE_SIZE
100 movcc ip, r1
101 movcs ip, #PAGE_SIZE
102 cmp r1, #0
103 bne 1b
104 mov r0, #0
105 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
106 ldr lr, [sp], #4
107 RET
108 END(sheeva_dcache_wbinv_range)
109
110 ENTRY(sheeva_idcache_wbinv_range)
111 str lr, [sp, #-4]!
112 mrs lr, cpsr
113 /* Start with cache line aligned address */
114 ldr ip, .Lsheeva_cache_line_size
115 ldr ip, [ip]
116 sub ip, ip, #1
117 and r2, r0, ip
118 add r1, r1, r2
119 add r1, r1, ip
120 bics r1, r1, ip
121 bics r0, r0, ip
122
123 ldr ip, .Lsheeva_asm_page_mask
124 and r2, r0, ip
125 rsb r2, r2, #PAGE_SIZE
126 cmp r1, r2
127 movcc ip, r1
128 movcs ip, r2
129 1:
130 add r3, r0, ip
131 sub r2, r3, #1
132 /* Disable irqs */
133 orr r3, lr, #I32_bit | F32_bit
134 msr cpsr_c, r3
135 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
136 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
137 /* Enable irqs */
138 msr cpsr_c, lr
139
140 /* Invalidate and clean icache line by line */
141 ldr r3, .Lsheeva_cache_line_size
142 ldr r3, [r3]
143 2:
144 mcr p15, 0, r0, c7, c5, 1
145 add r0, r0, r3
146 cmp r2, r0
147 bhi 2b
148
149 add r0, r2, #1
150 sub r1, r1, ip
151 cmp r1, #PAGE_SIZE
152 movcc ip, r1
153 movcs ip, #PAGE_SIZE
154 cmp r1, #0
155 bne 1b
156 mov r0, #0
157 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
158 ldr lr, [sp], #4
159 RET
160 END(sheeva_idcache_wbinv_range)
161
162 ENTRY(sheeva_dcache_inv_range)
163 str lr, [sp, #-4]!
164 mrs lr, cpsr
165 /* Start with cache line aligned address */
166 ldr ip, .Lsheeva_cache_line_size
167 ldr ip, [ip]
168 sub ip, ip, #1
169 and r2, r0, ip
170 add r1, r1, r2
171 add r1, r1, ip
172 bics r1, r1, ip
173 bics r0, r0, ip
174
175 ldr ip, .Lsheeva_asm_page_mask
176 and r2, r0, ip
177 rsb r2, r2, #PAGE_SIZE
178 cmp r1, r2
179 movcc ip, r1
180 movcs ip, r2
181 1:
182 add r3, r0, ip
183 sub r2, r3, #1
184 /* Disable irqs */
185 orr r3, lr, #I32_bit | F32_bit
186 msr cpsr_c, r3
187 mcr p15, 5, r0, c15, c14, 0 /* Inv zone start address */
188 mcr p15, 5, r2, c15, c14, 1 /* Inv zone end address */
189 /* Enable irqs */
190 msr cpsr_c, lr
191
192 add r0, r0, ip
193 sub r1, r1, ip
194 cmp r1, #PAGE_SIZE
195 movcc ip, r1
196 movcs ip, #PAGE_SIZE
197 cmp r1, #0
198 bne 1b
199 mov r0, #0
200 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
201 ldr lr, [sp], #4
202 RET
203 END(sheeva_dcache_inv_range)
204
205 ENTRY(sheeva_dcache_wb_range)
206 str lr, [sp, #-4]!
207 mrs lr, cpsr
208 /* Start with cache line aligned address */
209 ldr ip, .Lsheeva_cache_line_size
210 ldr ip, [ip]
211 sub ip, ip, #1
212 and r2, r0, ip
213 add r1, r1, r2
214 add r1, r1, ip
215 bics r1, r1, ip
216 bics r0, r0, ip
217
218 ldr ip, .Lsheeva_asm_page_mask
219 and r2, r0, ip
220 rsb r2, r2, #PAGE_SIZE
221 cmp r1, r2
222 movcc ip, r1
223 movcs ip, r2
224 1:
225 add r3, r0, ip
226 sub r2, r3, #1
227 /* Disable irqs */
228 orr r3, lr, #I32_bit | F32_bit
229 msr cpsr_c, r3
230 mcr p15, 5, r0, c15, c13, 0 /* Clean zone start address */
231 mcr p15, 5, r2, c15, c13, 1 /* Clean zone end address */
232 /* Enable irqs */
233 msr cpsr_c, lr
234
235 add r0, r0, ip
236 sub r1, r1, ip
237 cmp r1, #PAGE_SIZE
238 movcc ip, r1
239 movcs ip, #PAGE_SIZE
240 cmp r1, #0
241 bne 1b
242 mov r0, #0
243 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
244 ldr lr, [sp], #4
245 RET
246 END(sheeva_dcache_wb_range)
247
248 ENTRY(sheeva_l2cache_wbinv_range)
249 str lr, [sp, #-4]!
250 mrs lr, cpsr
251 /* Start with cache line aligned address */
252 ldr ip, .Lsheeva_cache_line_size
253 ldr ip, [ip]
254 sub ip, ip, #1
255 and r2, r0, ip
256 add r1, r1, r2
257 add r1, r1, ip
258 bics r1, r1, ip
259 bics r0, r0, ip
260
261 ldr ip, .Lsheeva_asm_page_mask
262 and r2, r0, ip
263 rsb r2, r2, #PAGE_SIZE
264 cmp r1, r2
265 movcc ip, r1
266 movcs ip, r2
267 1:
268 add r3, r0, ip
269 sub r2, r3, #1
270 /* Disable irqs */
271 orr r3, lr, #I32_bit | F32_bit
272 msr cpsr_c, r3
273 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
274 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
275 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
276 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
277 /* Enable irqs */
278 msr cpsr_c, lr
279
280 add r0, r0, ip
281 sub r1, r1, ip
282 cmp r1, #PAGE_SIZE
283 movcc ip, r1
284 movcs ip, #PAGE_SIZE
285 cmp r1, #0
286 bne 1b
287 mov r0, #0
288 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
289 ldr lr, [sp], #4
290 RET
291 END(sheeva_l2cache_wbinv_range)
292
293 ENTRY(sheeva_l2cache_inv_range)
294 str lr, [sp, #-4]!
295 mrs lr, cpsr
296 /* Start with cache line aligned address */
297 ldr ip, .Lsheeva_cache_line_size
298 ldr ip, [ip]
299 sub ip, ip, #1
300 and r2, r0, ip
301 add r1, r1, r2
302 add r1, r1, ip
303 bics r1, r1, ip
304 bics r0, r0, ip
305
306 ldr ip, .Lsheeva_asm_page_mask
307 and r2, r0, ip
308 rsb r2, r2, #PAGE_SIZE
309 cmp r1, r2
310 movcc ip, r1
311 movcs ip, r2
312 1:
313 add r3, r0, ip
314 sub r2, r3, #1
315 /* Disable irqs */
316 orr r3, lr, #I32_bit | F32_bit
317 msr cpsr_c, r3
318 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
319 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
320 /* Enable irqs */
321 msr cpsr_c, lr
322
323 add r0, r0, ip
324 sub r1, r1, ip
325 cmp r1, #PAGE_SIZE
326 movcc ip, r1
327 movcs ip, #PAGE_SIZE
328 cmp r1, #0
329 bne 1b
330 mov r0, #0
331 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
332 ldr lr, [sp], #4
333 RET
334 END(sheeva_l2cache_inv_range)
335
336 ENTRY(sheeva_l2cache_wb_range)
337 str lr, [sp, #-4]!
338 mrs lr, cpsr
339 /* Start with cache line aligned address */
340 ldr ip, .Lsheeva_cache_line_size
341 ldr ip, [ip]
342 sub ip, ip, #1
343 and r2, r0, ip
344 add r1, r1, r2
345 add r1, r1, ip
346 bics r1, r1, ip
347 bics r0, r0, ip
348
349 ldr ip, .Lsheeva_asm_page_mask
350 and r2, r0, ip
351 rsb r2, r2, #PAGE_SIZE
352 cmp r1, r2
353 movcc ip, r1
354 movcs ip, r2
355 1:
356 add r3, r0, ip
357 sub r2, r3, #1
358 /* Disable irqs */
359 orr r3, lr, #I32_bit | F32_bit
360 msr cpsr_c, r3
361 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
362 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
363 /* Enable irqs */
364 msr cpsr_c, lr
365
366 add r0, r0, ip
367 sub r1, r1, ip
368 cmp r1, #PAGE_SIZE
369 movcc ip, r1
370 movcs ip, #PAGE_SIZE
371 cmp r1, #0
372 bne 1b
373 mov r0, #0
374 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
375 ldr lr, [sp], #4
376 RET
377 END(sheeva_l2cache_wb_range)
378
379 ENTRY(sheeva_l2cache_wbinv_all)
380 /* Disable irqs */
381 mrs r1, cpsr
382 orr r2, r1, #I32_bit | F32_bit
383 msr cpsr_c, r2
384
385 mov r0, #0
386 mcr p15, 1, r0, c15, c9, 0 /* Clean L2 */
387 mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
388
389 msr cpsr_c, r1 /* Reenable irqs */
390
391 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
392 RET
393 END(sheeva_l2cache_wbinv_all)
394
395 /* This function modifies register value as follows:
396 *
397 * arg1 arg EFFECT (bit value saved into register)
398 * 0 0 not changed
399 * 0 1 negated
400 * 1 0 cleared
401 * 1 1 set
402 */
403 ENTRY(sheeva_control_ext)
404 mrc p15, 1, r3, c15, c1, 0 /* Read the control register */
405 bic r2, r3, r0 /* Clear bits */
406 eor r2, r2, r1 /* XOR bits */
407
408 teq r2, r3 /* Only write if there is a change */
409 mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
410 mov r0, r3 /* Return old value */
411 RET
412 END(sheeva_control_ext)
413
414 ENTRY(sheeva_cpu_sleep)
415 mov r0, #0
416 mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
417 mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
418 mov pc, lr
419 END(sheeva_cpu_sleep)
420
Cache object: ca1eede8a9ef214b8872e80b76a7e0c0
|