1 /* $NetBSD: cache_r4k.h,v 1.10 2003/03/08 04:43:26 rafal Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * $FreeBSD$
38 */
39
40 /*
41 * Cache definitions/operations for R4000-style caches.
42 */
43
44 #define CACHE_R4K_I 0
45 #define CACHE_R4K_D 1
46 #define CACHE_R4K_SI 2
47 #define CACHE_R4K_SD 3
48
49 #define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */
50 #define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */
51 #define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */
52 #define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */
53 #define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */
54 #define CACHEOP_R4K_HIT_INV (4 << 2) /* all */
55 #define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */
56 #define CACHEOP_R4K_FILL (5 << 2) /* I */
57 #define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */
58 #define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */
59
60 #if !defined(LOCORE)
61
62 /*
63 * cache_r4k_op_line:
64 *
65 * Perform the specified cache operation on a single line.
66 */
67 #define cache_op_r4k_line(va, op) \
68 do { \
69 __asm __volatile( \
70 ".set noreorder \n\t" \
71 "cache %1, 0(%0) \n\t" \
72 ".set reorder" \
73 : \
74 : "r" (va), "i" (op) \
75 : "memory"); \
76 } while (/*CONSTCOND*/0)
77
78 /*
79 * cache_r4k_op_8lines_16:
80 *
81 * Perform the specified cache operation on 8 16-byte cache lines.
82 */
83 #define cache_r4k_op_8lines_16(va, op) \
84 do { \
85 __asm __volatile( \
86 ".set noreorder \n\t" \
87 "cache %1, 0x00(%0); cache %1, 0x10(%0) \n\t" \
88 "cache %1, 0x20(%0); cache %1, 0x30(%0) \n\t" \
89 "cache %1, 0x40(%0); cache %1, 0x50(%0) \n\t" \
90 "cache %1, 0x60(%0); cache %1, 0x70(%0) \n\t" \
91 ".set reorder" \
92 : \
93 : "r" (va), "i" (op) \
94 : "memory"); \
95 } while (/*CONSTCOND*/0)
96
97 /*
98 * cache_r4k_op_8lines_32:
99 *
100 * Perform the specified cache operation on 8 32-byte cache lines.
101 */
102 #define cache_r4k_op_8lines_32(va, op) \
103 do { \
104 __asm __volatile( \
105 ".set noreorder \n\t" \
106 "cache %1, 0x00(%0); cache %1, 0x20(%0) \n\t" \
107 "cache %1, 0x40(%0); cache %1, 0x60(%0) \n\t" \
108 "cache %1, 0x80(%0); cache %1, 0xa0(%0) \n\t" \
109 "cache %1, 0xc0(%0); cache %1, 0xe0(%0) \n\t" \
110 ".set reorder" \
111 : \
112 : "r" (va), "i" (op) \
113 : "memory"); \
114 } while (/*CONSTCOND*/0)
115
116 /*
117 * cache_r4k_op_8lines_64:
118 *
119 * Perform the specified cache operation on 8 64-byte cache lines.
120 */
121 #define cache_r4k_op_8lines_64(va, op) \
122 do { \
123 __asm __volatile( \
124 ".set noreorder \n\t" \
125 "cache %1, 0x000(%0); cache %1, 0x040(%0) \n\t" \
126 "cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n\t" \
127 "cache %1, 0x100(%0); cache %1, 0x140(%0) \n\t" \
128 "cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n\t" \
129 ".set reorder" \
130 : \
131 : "r" (va), "i" (op) \
132 : "memory"); \
133 } while (/*CONSTCOND*/0)
134
135 /*
136 * cache_r4k_op_32lines_16:
137 *
138 * Perform the specified cache operation on 32 16-byte
139 * cache lines.
140 */
141 #define cache_r4k_op_32lines_16(va, op) \
142 do { \
143 __asm __volatile( \
144 ".set noreorder \n\t" \
145 "cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \
146 "cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \
147 "cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \
148 "cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \
149 "cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \
150 "cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \
151 "cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \
152 "cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \
153 "cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \
154 "cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \
155 "cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \
156 "cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \
157 "cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \
158 "cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \
159 "cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \
160 "cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \
161 ".set reorder" \
162 : \
163 : "r" (va), "i" (op) \
164 : "memory"); \
165 } while (/*CONSTCOND*/0)
166
167 /*
168 * cache_r4k_op_32lines_32:
169 *
170 * Perform the specified cache operation on 32 32-byte
171 * cache lines.
172 */
173 #define cache_r4k_op_32lines_32(va, op) \
174 do { \
175 __asm __volatile( \
176 ".set noreorder \n\t" \
177 "cache %1, 0x000(%0); cache %1, 0x020(%0); \n\t" \
178 "cache %1, 0x040(%0); cache %1, 0x060(%0); \n\t" \
179 "cache %1, 0x080(%0); cache %1, 0x0a0(%0); \n\t" \
180 "cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \n\t" \
181 "cache %1, 0x100(%0); cache %1, 0x120(%0); \n\t" \
182 "cache %1, 0x140(%0); cache %1, 0x160(%0); \n\t" \
183 "cache %1, 0x180(%0); cache %1, 0x1a0(%0); \n\t" \
184 "cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \n\t" \
185 "cache %1, 0x200(%0); cache %1, 0x220(%0); \n\t" \
186 "cache %1, 0x240(%0); cache %1, 0x260(%0); \n\t" \
187 "cache %1, 0x280(%0); cache %1, 0x2a0(%0); \n\t" \
188 "cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \n\t" \
189 "cache %1, 0x300(%0); cache %1, 0x320(%0); \n\t" \
190 "cache %1, 0x340(%0); cache %1, 0x360(%0); \n\t" \
191 "cache %1, 0x380(%0); cache %1, 0x3a0(%0); \n\t" \
192 "cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \n\t" \
193 ".set reorder" \
194 : \
195 : "r" (va), "i" (op) \
196 : "memory"); \
197 } while (/*CONSTCOND*/0)
198
199 /*
200 * cache_r4k_op_32lines_64:
201 *
202 * Perform the specified cache operation on 32 64-byte
203 * cache lines.
204 */
205 #define cache_r4k_op_32lines_64(va, op) \
206 do { \
207 __asm __volatile( \
208 ".set noreorder \n\t" \
209 "cache %1, 0x000(%0); cache %1, 0x040(%0); \n\t" \
210 "cache %1, 0x080(%0); cache %1, 0x0c0(%0); \n\t" \
211 "cache %1, 0x100(%0); cache %1, 0x140(%0); \n\t" \
212 "cache %1, 0x180(%0); cache %1, 0x1c0(%0); \n\t" \
213 "cache %1, 0x200(%0); cache %1, 0x240(%0); \n\t" \
214 "cache %1, 0x280(%0); cache %1, 0x2c0(%0); \n\t" \
215 "cache %1, 0x300(%0); cache %1, 0x340(%0); \n\t" \
216 "cache %1, 0x380(%0); cache %1, 0x3c0(%0); \n\t" \
217 "cache %1, 0x400(%0); cache %1, 0x440(%0); \n\t" \
218 "cache %1, 0x480(%0); cache %1, 0x4c0(%0); \n\t" \
219 "cache %1, 0x500(%0); cache %1, 0x540(%0); \n\t" \
220 "cache %1, 0x580(%0); cache %1, 0x5c0(%0); \n\t" \
221 "cache %1, 0x600(%0); cache %1, 0x640(%0); \n\t" \
222 "cache %1, 0x680(%0); cache %1, 0x6c0(%0); \n\t" \
223 "cache %1, 0x700(%0); cache %1, 0x740(%0); \n\t" \
224 "cache %1, 0x780(%0); cache %1, 0x7c0(%0); \n\t" \
225 ".set reorder" \
226 : \
227 : "r" (va), "i" (op) \
228 : "memory"); \
229 } while (/*CONSTCOND*/0)
230
231 /*
232 * cache_r4k_op_32lines_128:
233 *
234 * Perform the specified cache operation on 32 128-byte
235 * cache lines.
236 */
237 #define cache_r4k_op_32lines_128(va, op) \
238 do { \
239 __asm __volatile( \
240 ".set noreorder \n\t" \
241 "cache %1, 0x0000(%0); cache %1, 0x0080(%0); \n\t" \
242 "cache %1, 0x0100(%0); cache %1, 0x0180(%0); \n\t" \
243 "cache %1, 0x0200(%0); cache %1, 0x0280(%0); \n\t" \
244 "cache %1, 0x0300(%0); cache %1, 0x0380(%0); \n\t" \
245 "cache %1, 0x0400(%0); cache %1, 0x0480(%0); \n\t" \
246 "cache %1, 0x0500(%0); cache %1, 0x0580(%0); \n\t" \
247 "cache %1, 0x0600(%0); cache %1, 0x0680(%0); \n\t" \
248 "cache %1, 0x0700(%0); cache %1, 0x0780(%0); \n\t" \
249 "cache %1, 0x0800(%0); cache %1, 0x0880(%0); \n\t" \
250 "cache %1, 0x0900(%0); cache %1, 0x0980(%0); \n\t" \
251 "cache %1, 0x0a00(%0); cache %1, 0x0a80(%0); \n\t" \
252 "cache %1, 0x0b00(%0); cache %1, 0x0b80(%0); \n\t" \
253 "cache %1, 0x0c00(%0); cache %1, 0x0c80(%0); \n\t" \
254 "cache %1, 0x0d00(%0); cache %1, 0x0d80(%0); \n\t" \
255 "cache %1, 0x0e00(%0); cache %1, 0x0e80(%0); \n\t" \
256 "cache %1, 0x0f00(%0); cache %1, 0x0f80(%0); \n\t" \
257 ".set reorder" \
258 : \
259 : "r" (va), "i" (op) \
260 : "memory"); \
261 } while (/*CONSTCOND*/0)
262
263 /*
264 * cache_r4k_op_16lines_16_2way:
265 *
266 * Perform the specified cache operation on 16 16-byte
267 * cache lines, 2-ways.
268 */
269 #define cache_r4k_op_16lines_16_2way(va1, va2, op) \
270 do { \
271 __asm __volatile( \
272 ".set noreorder \n\t" \
273 "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \
274 "cache %2, 0x010(%0); cache %2, 0x010(%1); \n\t" \
275 "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \
276 "cache %2, 0x030(%0); cache %2, 0x030(%1); \n\t" \
277 "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \
278 "cache %2, 0x050(%0); cache %2, 0x050(%1); \n\t" \
279 "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \
280 "cache %2, 0x070(%0); cache %2, 0x070(%1); \n\t" \
281 "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \
282 "cache %2, 0x090(%0); cache %2, 0x090(%1); \n\t" \
283 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \
284 "cache %2, 0x0b0(%0); cache %2, 0x0b0(%1); \n\t" \
285 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \
286 "cache %2, 0x0d0(%0); cache %2, 0x0d0(%1); \n\t" \
287 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \
288 "cache %2, 0x0f0(%0); cache %2, 0x0f0(%1); \n\t" \
289 ".set reorder" \
290 : \
291 : "r" (va1), "r" (va2), "i" (op) \
292 : "memory"); \
293 } while (/*CONSTCOND*/0)
294
295 /*
296 * cache_r4k_op_16lines_32_2way:
297 *
298 * Perform the specified cache operation on 16 32-byte
299 * cache lines, 2-ways.
300 */
301 #define cache_r4k_op_16lines_32_2way(va1, va2, op) \
302 do { \
303 __asm __volatile( \
304 ".set noreorder \n\t" \
305 "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \
306 "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \
307 "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \
308 "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \
309 "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \
310 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \
311 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \
312 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \
313 "cache %2, 0x100(%0); cache %2, 0x100(%1); \n\t" \
314 "cache %2, 0x120(%0); cache %2, 0x120(%1); \n\t" \
315 "cache %2, 0x140(%0); cache %2, 0x140(%1); \n\t" \
316 "cache %2, 0x160(%0); cache %2, 0x160(%1); \n\t" \
317 "cache %2, 0x180(%0); cache %2, 0x180(%1); \n\t" \
318 "cache %2, 0x1a0(%0); cache %2, 0x1a0(%1); \n\t" \
319 "cache %2, 0x1c0(%0); cache %2, 0x1c0(%1); \n\t" \
320 "cache %2, 0x1e0(%0); cache %2, 0x1e0(%1); \n\t" \
321 ".set reorder" \
322 : \
323 : "r" (va1), "r" (va2), "i" (op) \
324 : "memory"); \
325 } while (/*CONSTCOND*/0)
326
327 /*
328 * cache_r4k_op_8lines_16_4way:
329 *
330 * Perform the specified cache operation on 8 16-byte
331 * cache lines, 4-ways.
332 */
333 #define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \
334 do { \
335 __asm __volatile( \
336 ".set noreorder \n\t" \
337 "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \
338 "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \
339 "cache %4, 0x010(%0); cache %4, 0x010(%1); \n\t" \
340 "cache %4, 0x010(%2); cache %4, 0x010(%3); \n\t" \
341 "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \
342 "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \
343 "cache %4, 0x030(%0); cache %4, 0x030(%1); \n\t" \
344 "cache %4, 0x030(%2); cache %4, 0x030(%3); \n\t" \
345 "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \
346 "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \
347 "cache %4, 0x050(%0); cache %4, 0x050(%1); \n\t" \
348 "cache %4, 0x050(%2); cache %4, 0x050(%3); \n\t" \
349 "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \
350 "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \
351 "cache %4, 0x070(%0); cache %4, 0x070(%1); \n\t" \
352 "cache %4, 0x070(%2); cache %4, 0x070(%3); \n\t" \
353 ".set reorder" \
354 : \
355 : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \
356 : "memory"); \
357 } while (/*CONSTCOND*/0)
358
359 /*
360 * cache_r4k_op_8lines_32_4way:
361 *
362 * Perform the specified cache operation on 8 32-byte
363 * cache lines, 4-ways.
364 */
365 #define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \
366 do { \
367 __asm __volatile( \
368 ".set noreorder \n\t" \
369 "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \
370 "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \
371 "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \
372 "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \
373 "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \
374 "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \
375 "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \
376 "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \
377 "cache %4, 0x080(%0); cache %4, 0x080(%1); \n\t" \
378 "cache %4, 0x080(%2); cache %4, 0x080(%3); \n\t" \
379 "cache %4, 0x0a0(%0); cache %4, 0x0a0(%1); \n\t" \
380 "cache %4, 0x0a0(%2); cache %4, 0x0a0(%3); \n\t" \
381 "cache %4, 0x0c0(%0); cache %4, 0x0c0(%1); \n\t" \
382 "cache %4, 0x0c0(%2); cache %4, 0x0c0(%3); \n\t" \
383 "cache %4, 0x0e0(%0); cache %4, 0x0e0(%1); \n\t" \
384 "cache %4, 0x0e0(%2); cache %4, 0x0e0(%3); \n\t" \
385 ".set reorder" \
386 : \
387 : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \
388 : "memory"); \
389 } while (/*CONSTCOND*/0)
390
391 void r4k_icache_sync_all_16(void);
392 void r4k_icache_sync_range_16(vm_paddr_t, vm_size_t);
393 void r4k_icache_sync_range_index_16(vm_paddr_t, vm_size_t);
394
395 void r4k_icache_sync_all_32(void);
396 void r4k_icache_sync_range_32(vm_paddr_t, vm_size_t);
397 void r4k_icache_sync_range_index_32(vm_paddr_t, vm_size_t);
398
399 void r4k_pdcache_wbinv_all_16(void);
400 void r4k_pdcache_wbinv_range_16(vm_paddr_t, vm_size_t);
401 void r4k_pdcache_wbinv_range_index_16(vm_paddr_t, vm_size_t);
402
403 void r4k_pdcache_inv_range_16(vm_paddr_t, vm_size_t);
404 void r4k_pdcache_wb_range_16(vm_paddr_t, vm_size_t);
405
406 void r4k_pdcache_wbinv_all_32(void);
407 void r4k_pdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
408 void r4k_pdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
409
410 void r4k_pdcache_inv_range_32(vm_paddr_t, vm_size_t);
411 void r4k_pdcache_wb_range_32(vm_paddr_t, vm_size_t);
412
413 void r4k_sdcache_wbinv_all_32(void);
414 void r4k_sdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
415 void r4k_sdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
416
417 void r4k_sdcache_inv_range_32(vm_paddr_t, vm_size_t);
418 void r4k_sdcache_wb_range_32(vm_paddr_t, vm_size_t);
419
420 void r4k_sdcache_wbinv_all_128(void);
421 void r4k_sdcache_wbinv_range_128(vm_paddr_t, vm_size_t);
422 void r4k_sdcache_wbinv_range_index_128(vm_paddr_t, vm_size_t);
423
424 void r4k_sdcache_inv_range_128(vm_paddr_t, vm_size_t);
425 void r4k_sdcache_wb_range_128(vm_paddr_t, vm_size_t);
426
427 void r4k_sdcache_wbinv_all_generic(void);
428 void r4k_sdcache_wbinv_range_generic(vm_paddr_t, vm_size_t);
429 void r4k_sdcache_wbinv_range_index_generic(vm_paddr_t, vm_size_t);
430
431 void r4k_sdcache_inv_range_generic(vm_paddr_t, vm_size_t);
432 void r4k_sdcache_wb_range_generic(vm_paddr_t, vm_size_t);
433
434 #endif /* !LOCORE */
Cache object: 5bce00281ffda2c82c4ffb516da67f72
|