1 /*-
2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3 * All rights reserved.
4 *
5 * Developed by Semihalf.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of MARVELL nor the names of contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <machine/armreg.h>
33 #include <machine/asm.h>
34 __FBSDID("$FreeBSD: releng/10.2/sys/arm/arm/cpufunc_asm_sheeva.S 278613 2015-02-12 03:50:33Z ian $");
35
36 #include <machine/param.h>
37
38 .Lsheeva_cache_line_size:
39 .word _C_LABEL(arm_pdcache_line_size)
40 .Lsheeva_asm_page_mask:
41 .word _C_LABEL(PAGE_MASK)
42
43 ENTRY(sheeva_setttb)
44 /* Disable irqs */
45 mrs r2, cpsr
46 orr r3, r2, #PSR_I | PSR_F
47 msr cpsr_c, r3
48
49 mov r1, #0
50 mcr p15, 0, r1, c7, c5, 0 /* Invalidate ICache */
51 1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
52 bne 1b /* More to do? */
53
54 mcr p15, 1, r1, c15, c9, 0 /* Clean L2 */
55 mcr p15, 1, r1, c15, c11, 0 /* Invalidate L2 */
56
57 /* Reenable irqs */
58 msr cpsr_c, r2
59
60 mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */
61
62 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
63
64 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
65 RET
66 END(sheeva_setttb)
67
68 ENTRY(sheeva_dcache_wbinv_range)
69 str lr, [sp, #-4]!
70 mrs lr, cpsr
71 /* Start with cache line aligned address */
72 ldr ip, .Lsheeva_cache_line_size
73 ldr ip, [ip]
74 sub ip, ip, #1
75 and r2, r0, ip
76 add r1, r1, r2
77 add r1, r1, ip
78 bics r1, r1, ip
79 bics r0, r0, ip
80
81 ldr ip, .Lsheeva_asm_page_mask
82 and r2, r0, ip
83 rsb r2, r2, #PAGE_SIZE
84 cmp r1, r2
85 movcc ip, r1
86 movcs ip, r2
87 1:
88 add r3, r0, ip
89 sub r2, r3, #1
90 /* Disable irqs */
91 orr r3, lr, #PSR_I | PSR_F
92 msr cpsr_c, r3
93 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
94 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
95 /* Enable irqs */
96 msr cpsr_c, lr
97
98 add r0, r0, ip
99 sub r1, r1, ip
100 cmp r1, #PAGE_SIZE
101 movcc ip, r1
102 movcs ip, #PAGE_SIZE
103 cmp r1, #0
104 bne 1b
105 mov r0, #0
106 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
107 ldr lr, [sp], #4
108 RET
109 END(sheeva_dcache_wbinv_range)
110
111 ENTRY(sheeva_idcache_wbinv_range)
112 str lr, [sp, #-4]!
113 mrs lr, cpsr
114 /* Start with cache line aligned address */
115 ldr ip, .Lsheeva_cache_line_size
116 ldr ip, [ip]
117 sub ip, ip, #1
118 and r2, r0, ip
119 add r1, r1, r2
120 add r1, r1, ip
121 bics r1, r1, ip
122 bics r0, r0, ip
123
124 ldr ip, .Lsheeva_asm_page_mask
125 and r2, r0, ip
126 rsb r2, r2, #PAGE_SIZE
127 cmp r1, r2
128 movcc ip, r1
129 movcs ip, r2
130 1:
131 add r3, r0, ip
132 sub r2, r3, #1
133 /* Disable irqs */
134 orr r3, lr, #PSR_I | PSR_F
135 msr cpsr_c, r3
136 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
137 mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
138 /* Enable irqs */
139 msr cpsr_c, lr
140
141 /* Invalidate and clean icache line by line */
142 ldr r3, .Lsheeva_cache_line_size
143 ldr r3, [r3]
144 2:
145 mcr p15, 0, r0, c7, c5, 1
146 add r0, r0, r3
147 cmp r2, r0
148 bhi 2b
149
150 add r0, r2, #1
151 sub r1, r1, ip
152 cmp r1, #PAGE_SIZE
153 movcc ip, r1
154 movcs ip, #PAGE_SIZE
155 cmp r1, #0
156 bne 1b
157 mov r0, #0
158 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
159 ldr lr, [sp], #4
160 RET
161 END(sheeva_idcache_wbinv_range)
162
163 ENTRY(sheeva_dcache_inv_range)
164 str lr, [sp, #-4]!
165 mrs lr, cpsr
166 /* Start with cache line aligned address */
167 ldr ip, .Lsheeva_cache_line_size
168 ldr ip, [ip]
169 sub ip, ip, #1
170 and r2, r0, ip
171 add r1, r1, r2
172 add r1, r1, ip
173 bics r1, r1, ip
174 bics r0, r0, ip
175
176 ldr ip, .Lsheeva_asm_page_mask
177 and r2, r0, ip
178 rsb r2, r2, #PAGE_SIZE
179 cmp r1, r2
180 movcc ip, r1
181 movcs ip, r2
182 1:
183 add r3, r0, ip
184 sub r2, r3, #1
185 /* Disable irqs */
186 orr r3, lr, #PSR_I | PSR_F
187 msr cpsr_c, r3
188 mcr p15, 5, r0, c15, c14, 0 /* Inv zone start address */
189 mcr p15, 5, r2, c15, c14, 1 /* Inv zone end address */
190 /* Enable irqs */
191 msr cpsr_c, lr
192
193 add r0, r0, ip
194 sub r1, r1, ip
195 cmp r1, #PAGE_SIZE
196 movcc ip, r1
197 movcs ip, #PAGE_SIZE
198 cmp r1, #0
199 bne 1b
200 mov r0, #0
201 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
202 ldr lr, [sp], #4
203 RET
204 END(sheeva_dcache_inv_range)
205
206 ENTRY(sheeva_dcache_wb_range)
207 str lr, [sp, #-4]!
208 mrs lr, cpsr
209 /* Start with cache line aligned address */
210 ldr ip, .Lsheeva_cache_line_size
211 ldr ip, [ip]
212 sub ip, ip, #1
213 and r2, r0, ip
214 add r1, r1, r2
215 add r1, r1, ip
216 bics r1, r1, ip
217 bics r0, r0, ip
218
219 ldr ip, .Lsheeva_asm_page_mask
220 and r2, r0, ip
221 rsb r2, r2, #PAGE_SIZE
222 cmp r1, r2
223 movcc ip, r1
224 movcs ip, r2
225 1:
226 add r3, r0, ip
227 sub r2, r3, #1
228 /* Disable irqs */
229 orr r3, lr, #PSR_I | PSR_F
230 msr cpsr_c, r3
231 mcr p15, 5, r0, c15, c13, 0 /* Clean zone start address */
232 mcr p15, 5, r2, c15, c13, 1 /* Clean zone end address */
233 /* Enable irqs */
234 msr cpsr_c, lr
235
236 add r0, r0, ip
237 sub r1, r1, ip
238 cmp r1, #PAGE_SIZE
239 movcc ip, r1
240 movcs ip, #PAGE_SIZE
241 cmp r1, #0
242 bne 1b
243 mov r0, #0
244 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
245 ldr lr, [sp], #4
246 RET
247 END(sheeva_dcache_wb_range)
248
249 ENTRY(sheeva_l2cache_wbinv_range)
250 str lr, [sp, #-4]!
251 mrs lr, cpsr
252 /* Start with cache line aligned address */
253 ldr ip, .Lsheeva_cache_line_size
254 ldr ip, [ip]
255 sub ip, ip, #1
256 and r2, r0, ip
257 add r1, r1, r2
258 add r1, r1, ip
259 bics r1, r1, ip
260 bics r0, r0, ip
261
262 ldr ip, .Lsheeva_asm_page_mask
263 and r2, r0, ip
264 rsb r2, r2, #PAGE_SIZE
265 cmp r1, r2
266 movcc ip, r1
267 movcs ip, r2
268 1:
269 add r3, r0, ip
270 sub r2, r3, #1
271 /* Disable irqs */
272 orr r3, lr, #PSR_I | PSR_F
273 msr cpsr_c, r3
274 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
275 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
276 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
277 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
278 /* Enable irqs */
279 msr cpsr_c, lr
280
281 add r0, r0, ip
282 sub r1, r1, ip
283 cmp r1, #PAGE_SIZE
284 movcc ip, r1
285 movcs ip, #PAGE_SIZE
286 cmp r1, #0
287 bne 1b
288 mov r0, #0
289 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
290 ldr lr, [sp], #4
291 RET
292 END(sheeva_l2cache_wbinv_range)
293
294 ENTRY(sheeva_l2cache_inv_range)
295 str lr, [sp, #-4]!
296 mrs lr, cpsr
297 /* Start with cache line aligned address */
298 ldr ip, .Lsheeva_cache_line_size
299 ldr ip, [ip]
300 sub ip, ip, #1
301 and r2, r0, ip
302 add r1, r1, r2
303 add r1, r1, ip
304 bics r1, r1, ip
305 bics r0, r0, ip
306
307 ldr ip, .Lsheeva_asm_page_mask
308 and r2, r0, ip
309 rsb r2, r2, #PAGE_SIZE
310 cmp r1, r2
311 movcc ip, r1
312 movcs ip, r2
313 1:
314 add r3, r0, ip
315 sub r2, r3, #1
316 /* Disable irqs */
317 orr r3, lr, #PSR_I | PSR_F
318 msr cpsr_c, r3
319 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
320 mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
321 /* Enable irqs */
322 msr cpsr_c, lr
323
324 add r0, r0, ip
325 sub r1, r1, ip
326 cmp r1, #PAGE_SIZE
327 movcc ip, r1
328 movcs ip, #PAGE_SIZE
329 cmp r1, #0
330 bne 1b
331 mov r0, #0
332 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
333 ldr lr, [sp], #4
334 RET
335 END(sheeva_l2cache_inv_range)
336
337 ENTRY(sheeva_l2cache_wb_range)
338 str lr, [sp, #-4]!
339 mrs lr, cpsr
340 /* Start with cache line aligned address */
341 ldr ip, .Lsheeva_cache_line_size
342 ldr ip, [ip]
343 sub ip, ip, #1
344 and r2, r0, ip
345 add r1, r1, r2
346 add r1, r1, ip
347 bics r1, r1, ip
348 bics r0, r0, ip
349
350 ldr ip, .Lsheeva_asm_page_mask
351 and r2, r0, ip
352 rsb r2, r2, #PAGE_SIZE
353 cmp r1, r2
354 movcc ip, r1
355 movcs ip, r2
356 1:
357 add r3, r0, ip
358 sub r2, r3, #1
359 /* Disable irqs */
360 orr r3, lr, #PSR_I | PSR_F
361 msr cpsr_c, r3
362 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */
363 mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */
364 /* Enable irqs */
365 msr cpsr_c, lr
366
367 add r0, r0, ip
368 sub r1, r1, ip
369 cmp r1, #PAGE_SIZE
370 movcc ip, r1
371 movcs ip, #PAGE_SIZE
372 cmp r1, #0
373 bne 1b
374 mov r0, #0
375 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
376 ldr lr, [sp], #4
377 RET
378 END(sheeva_l2cache_wb_range)
379
380 ENTRY(sheeva_l2cache_wbinv_all)
381 /* Disable irqs */
382 mrs r1, cpsr
383 orr r2, r1, #PSR_I | PSR_F
384 msr cpsr_c, r2
385
386 mov r0, #0
387 mcr p15, 1, r0, c15, c9, 0 /* Clean L2 */
388 mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
389
390 msr cpsr_c, r1 /* Reenable irqs */
391
392 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
393 RET
394 END(sheeva_l2cache_wbinv_all)
395
396 /* This function modifies register value as follows:
397 *
398 * arg1 arg EFFECT (bit value saved into register)
399 * 0 0 not changed
400 * 0 1 negated
401 * 1 0 cleared
402 * 1 1 set
403 */
404 ENTRY(sheeva_control_ext)
405 mrc p15, 1, r3, c15, c1, 0 /* Read the control register */
406 bic r2, r3, r0 /* Clear bits */
407 eor r2, r2, r1 /* XOR bits */
408
409 teq r2, r3 /* Only write if there is a change */
410 mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
411 mov r0, r3 /* Return old value */
412 RET
413 END(sheeva_control_ext)
414
415 ENTRY(sheeva_cpu_sleep)
416 mov r0, #0
417 mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
418 mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
419 mov pc, lr
420 END(sheeva_cpu_sleep)
421
Cache object: 8294358617dd1a29d2ae3d830a215aec
|