1 /* $NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2004 ARM Limited
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the company may not be used to endorse or promote
16 * products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * ARM9 assembly functions for CPU / MMU / TLB specific operations
32 */
33
34 #include <machine/asm.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38 * Functions to set the MMU Translation Table Base register
39 *
40 * We need to clean and flush the cache as it uses virtual
41 * addresses that are about to change.
42 */
43 ENTRY(arm9_setttb)
44 stmfd sp!, {r0, lr}
45 bl _C_LABEL(arm9_idcache_wbinv_all)
46 ldmfd sp!, {r0, lr}
47
48 mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
49
50 mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
51 mov pc, lr
52
53 /*
54 * TLB functions
55 */
56 ENTRY(arm9_tlb_flushID_SE)
57 mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
58 mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
59 mov pc, lr
60
61 /*
62 * Cache operations. For the entire cache we use the set/index
63 * operations.
64 */
65 s_max .req r0
66 i_max .req r1
67 s_inc .req r2
68 i_inc .req r3
69
70 ENTRY_NP(arm9_icache_sync_range)
71 ldr ip, .Larm9_line_size
72 cmp r1, #0x4000
73 bcs .Larm9_icache_sync_all
74 ldr ip, [ip]
75 sub r3, ip, #1
76 and r2, r0, r3
77 add r1, r1, r2
78 bic r0, r0, r3
79 .Larm9_sync_next:
80 mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
81 mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
82 add r0, r0, ip
83 subs r1, r1, ip
84 bpl .Larm9_sync_next
85 mov pc, lr
86
87 ENTRY_NP(arm9_icache_sync_all)
88 .Larm9_icache_sync_all:
89 /*
90 * We assume that the code here can never be out of sync with the
91 * dcache, so that we can safely flush the Icache and fall through
92 * into the Dcache cleaning code.
93 */
94 mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
95 /* Fall through to clean Dcache. */
96
97 .Larm9_dcache_wb:
98 ldr ip, .Larm9_cache_data
99 ldmia ip, {s_max, i_max, s_inc, i_inc}
100 .Lnext_set:
101 orr ip, s_max, i_max
102 .Lnext_index:
103 mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
104 sub ip, ip, i_inc
105 tst ip, i_max /* Index 0 is last one */
106 bne .Lnext_index /* Next index */
107 mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
108 subs s_max, s_max, s_inc
109 bpl .Lnext_set /* Next set */
110 mov pc, lr
111
112 .Larm9_line_size:
113 .word _C_LABEL(arm_pdcache_line_size)
114
115 ENTRY(arm9_dcache_wb_range)
116 ldr ip, .Larm9_line_size
117 cmp r1, #0x4000
118 bcs .Larm9_dcache_wb
119 ldr ip, [ip]
120 sub r3, ip, #1
121 and r2, r0, r3
122 add r1, r1, r2
123 bic r0, r0, r3
124 .Larm9_wb_next:
125 mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
126 add r0, r0, ip
127 subs r1, r1, ip
128 bpl .Larm9_wb_next
129 mov pc, lr
130
131 ENTRY(arm9_dcache_wbinv_range)
132 ldr ip, .Larm9_line_size
133 cmp r1, #0x4000
134 bcs .Larm9_dcache_wbinv_all
135 ldr ip, [ip]
136 sub r3, ip, #1
137 and r2, r0, r3
138 add r1, r1, r2
139 bic r0, r0, r3
140 .Larm9_wbinv_next:
141 mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
142 add r0, r0, ip
143 subs r1, r1, ip
144 bpl .Larm9_wbinv_next
145 mov pc, lr
146
147 /*
148 * Note, we must not invalidate everything. If the range is too big we
149 * must use wb-inv of the entire cache.
150 */
151 ENTRY(arm9_dcache_inv_range)
152 ldr ip, .Larm9_line_size
153 cmp r1, #0x4000
154 bcs .Larm9_dcache_wbinv_all
155 ldr ip, [ip]
156 sub r3, ip, #1
157 and r2, r0, r3
158 add r1, r1, r2
159 bic r0, r0, r3
160 .Larm9_inv_next:
161 mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
162 add r0, r0, ip
163 subs r1, r1, ip
164 bpl .Larm9_inv_next
165 mov pc, lr
166
167 ENTRY(arm9_idcache_wbinv_range)
168 ldr ip, .Larm9_line_size
169 cmp r1, #0x4000
170 bcs .Larm9_idcache_wbinv_all
171 ldr ip, [ip]
172 sub r3, ip, #1
173 and r2, r0, r3
174 add r1, r1, r2
175 bic r0, r0, r3
176 .Larm9_id_wbinv_next:
177 mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
178 mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
179 add r0, r0, ip
180 subs r1, r1, ip
181 bpl .Larm9_id_wbinv_next
182 mov pc, lr
183
184 ENTRY_NP(arm9_idcache_wbinv_all)
185 .Larm9_idcache_wbinv_all:
186 /*
187 * We assume that the code here can never be out of sync with the
188 * dcache, so that we can safely flush the Icache and fall through
189 * into the Dcache purging code.
190 */
191 mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
192 /* Fall through */
193
194 ENTRY(arm9_dcache_wbinv_all)
195 .Larm9_dcache_wbinv_all:
196 ldr ip, .Larm9_cache_data
197 ldmia ip, {s_max, i_max, s_inc, i_inc}
198 .Lnext_set_inv:
199 orr ip, s_max, i_max
200 .Lnext_index_inv:
201 mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
202 sub ip, ip, i_inc
203 tst ip, i_max /* Index 0 is last one */
204 bne .Lnext_index_inv /* Next index */
205 mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
206 subs s_max, s_max, s_inc
207 bpl .Lnext_set_inv /* Next set */
208 mov pc, lr
209
210 .Larm9_cache_data:
211 .word _C_LABEL(arm9_dcache_sets_max)
212
213 /*
214 * Context switch.
215 *
216 * These is the CPU-specific parts of the context switcher cpu_switch()
217 * These functions actually perform the TTB reload.
218 *
219 * NOTE: Special calling convention
220 * r1, r4-r13 must be preserved
221 */
222 ENTRY(arm9_context_switch)
223 /*
224 * We can assume that the caches will only contain kernel addresses
225 * at this point. So no need to flush them again.
226 */
227 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
228 mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
229 mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
230
231 /* Paranoia -- make sure the pipeline is empty. */
232 nop
233 nop
234 nop
235 mov pc, lr
236
237 .bss
238
239 /* XXX The following macros should probably be moved to asm.h */
240 #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
241 #define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
242
243 /*
244 * Parameters for the cache cleaning code. Note that the order of these
245 * four variables is assumed in the code above. Hence the reason for
246 * declaring them in the assembler file.
247 */
248 .align 0
249 C_OBJECT(arm9_dcache_sets_max)
250 .space 4
251 C_OBJECT(arm9_dcache_index_max)
252 .space 4
253 C_OBJECT(arm9_dcache_sets_inc)
254 .space 4
255 C_OBJECT(arm9_dcache_index_inc)
256 .space 4
Cache object: a0ec611ae1f9b367d42ee34d991c19c8
|