FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc.c
1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created : 30/01/97
46 */
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/bus.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/uma.h>
62
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
66
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
70 #endif
71
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
75 #endif
76
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
79 #endif
80
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
84 #endif
85
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90
91 #if defined(PERFCTRS)
92 struct arm_pmc_funcs *arm_pmc;
93 #endif
94
95 /* PRIMARY CACHE VARIABLES */
96 int arm_picache_size;
97 int arm_picache_line_size;
98 int arm_picache_ways;
99
100 int arm_pdcache_size; /* and unified */
101 int arm_pdcache_line_size;
102 int arm_pdcache_ways;
103
104 int arm_pcache_type;
105 int arm_pcache_unified;
106
107 int arm_dcache_align;
108 int arm_dcache_align_mask;
109
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
112 int ctrl;
113
114 #ifdef CPU_ARM7TDMI
115 struct cpu_functions arm7tdmi_cpufuncs = {
116 /* CPU functions */
117
118 cpufunc_id, /* id */
119 cpufunc_nullop, /* cpwait */
120
121 /* MMU functions */
122
123 cpufunc_control, /* control */
124 cpufunc_domains, /* domain */
125 arm7tdmi_setttb, /* setttb */
126 cpufunc_faultstatus, /* faultstatus */
127 cpufunc_faultaddress, /* faultaddress */
128
129 /* TLB functions */
130
131 arm7tdmi_tlb_flushID, /* tlb_flushID */
132 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
133 arm7tdmi_tlb_flushID, /* tlb_flushI */
134 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
135 arm7tdmi_tlb_flushID, /* tlb_flushD */
136 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
137
138 /* Cache operations */
139
140 cpufunc_nullop, /* icache_sync_all */
141 (void *)cpufunc_nullop, /* icache_sync_range */
142
143 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
144 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
145 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
146 (void *)cpufunc_nullop, /* dcache_wb_range */
147
148 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
149 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
150 cpufunc_nullop, /* l2cache_wbinv_all */
151 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
152 (void *)cpufunc_nullop, /* l2cache_inv_range */
153 (void *)cpufunc_nullop, /* l2cache_wb_range */
154
155 /* Other functions */
156
157 cpufunc_nullop, /* flush_prefetchbuf */
158 cpufunc_nullop, /* drain_writebuf */
159 cpufunc_nullop, /* flush_brnchtgt_C */
160 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
161
162 (void *)cpufunc_nullop, /* sleep */
163
164 /* Soft functions */
165
166 late_abort_fixup, /* dataabt_fixup */
167 cpufunc_null_fixup, /* prefetchabt_fixup */
168
169 arm7tdmi_context_switch, /* context_switch */
170
171 arm7tdmi_setup /* cpu setup */
172
173 };
174 #endif /* CPU_ARM7TDMI */
175
176 #ifdef CPU_ARM8
177 struct cpu_functions arm8_cpufuncs = {
178 /* CPU functions */
179
180 cpufunc_id, /* id */
181 cpufunc_nullop, /* cpwait */
182
183 /* MMU functions */
184
185 cpufunc_control, /* control */
186 cpufunc_domains, /* domain */
187 arm8_setttb, /* setttb */
188 cpufunc_faultstatus, /* faultstatus */
189 cpufunc_faultaddress, /* faultaddress */
190
191 /* TLB functions */
192
193 arm8_tlb_flushID, /* tlb_flushID */
194 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
195 arm8_tlb_flushID, /* tlb_flushI */
196 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
197 arm8_tlb_flushID, /* tlb_flushD */
198 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
199
200 /* Cache operations */
201
202 cpufunc_nullop, /* icache_sync_all */
203 (void *)cpufunc_nullop, /* icache_sync_range */
204
205 arm8_cache_purgeID, /* dcache_wbinv_all */
206 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
207 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
208 (void *)arm8_cache_cleanID, /* dcache_wb_range */
209
210 arm8_cache_purgeID, /* idcache_wbinv_all */
211 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
212 cpufunc_nullop, /* l2cache_wbinv_all */
213 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
214 (void *)cpufunc_nullop, /* l2cache_inv_range */
215 (void *)cpufunc_nullop, /* l2cache_wb_range */
216
217 /* Other functions */
218
219 cpufunc_nullop, /* flush_prefetchbuf */
220 cpufunc_nullop, /* drain_writebuf */
221 cpufunc_nullop, /* flush_brnchtgt_C */
222 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
223
224 (void *)cpufunc_nullop, /* sleep */
225
226 /* Soft functions */
227
228 cpufunc_null_fixup, /* dataabt_fixup */
229 cpufunc_null_fixup, /* prefetchabt_fixup */
230
231 arm8_context_switch, /* context_switch */
232
233 arm8_setup /* cpu setup */
234 };
235 #endif /* CPU_ARM8 */
236
237 #ifdef CPU_ARM9
238 struct cpu_functions arm9_cpufuncs = {
239 /* CPU functions */
240
241 cpufunc_id, /* id */
242 cpufunc_nullop, /* cpwait */
243
244 /* MMU functions */
245
246 cpufunc_control, /* control */
247 cpufunc_domains, /* Domain */
248 arm9_setttb, /* Setttb */
249 cpufunc_faultstatus, /* Faultstatus */
250 cpufunc_faultaddress, /* Faultaddress */
251
252 /* TLB functions */
253
254 armv4_tlb_flushID, /* tlb_flushID */
255 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
256 armv4_tlb_flushI, /* tlb_flushI */
257 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
258 armv4_tlb_flushD, /* tlb_flushD */
259 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
260
261 /* Cache operations */
262
263 arm9_icache_sync_all, /* icache_sync_all */
264 arm9_icache_sync_range, /* icache_sync_range */
265
266 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
267 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
268 /*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
269 arm9_dcache_wb_range, /* dcache_wb_range */
270
271 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
272 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
273 cpufunc_nullop, /* l2cache_wbinv_all */
274 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
275 (void *)cpufunc_nullop, /* l2cache_inv_range */
276 (void *)cpufunc_nullop, /* l2cache_wb_range */
277
278 /* Other functions */
279
280 cpufunc_nullop, /* flush_prefetchbuf */
281 armv4_drain_writebuf, /* drain_writebuf */
282 cpufunc_nullop, /* flush_brnchtgt_C */
283 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
284
285 (void *)cpufunc_nullop, /* sleep */
286
287 /* Soft functions */
288
289 cpufunc_null_fixup, /* dataabt_fixup */
290 cpufunc_null_fixup, /* prefetchabt_fixup */
291
292 arm9_context_switch, /* context_switch */
293
294 arm9_setup /* cpu setup */
295
296 };
297 #endif /* CPU_ARM9 */
298
299 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
300 struct cpu_functions armv5_ec_cpufuncs = {
301 /* CPU functions */
302
303 cpufunc_id, /* id */
304 cpufunc_nullop, /* cpwait */
305
306 /* MMU functions */
307
308 cpufunc_control, /* control */
309 cpufunc_domains, /* Domain */
310 armv5_ec_setttb, /* Setttb */
311 cpufunc_faultstatus, /* Faultstatus */
312 cpufunc_faultaddress, /* Faultaddress */
313
314 /* TLB functions */
315
316 armv4_tlb_flushID, /* tlb_flushID */
317 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
318 armv4_tlb_flushI, /* tlb_flushI */
319 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
320 armv4_tlb_flushD, /* tlb_flushD */
321 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
322
323 /* Cache operations */
324
325 armv5_ec_icache_sync_all, /* icache_sync_all */
326 armv5_ec_icache_sync_range, /* icache_sync_range */
327
328 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
329 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
330 /*XXX*/ armv5_ec_dcache_wbinv_range, /* dcache_inv_range */
331 armv5_ec_dcache_wb_range, /* dcache_wb_range */
332
333 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
334 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
335
336 cpufunc_nullop, /* l2cache_wbinv_all */
337 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
338 (void *)cpufunc_nullop, /* l2cache_inv_range */
339 (void *)cpufunc_nullop, /* l2cache_wb_range */
340
341 /* Other functions */
342
343 cpufunc_nullop, /* flush_prefetchbuf */
344 armv4_drain_writebuf, /* drain_writebuf */
345 cpufunc_nullop, /* flush_brnchtgt_C */
346 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
347
348 (void *)cpufunc_nullop, /* sleep */
349
350 /* Soft functions */
351
352 cpufunc_null_fixup, /* dataabt_fixup */
353 cpufunc_null_fixup, /* prefetchabt_fixup */
354
355 arm10_context_switch, /* context_switch */
356
357 arm10_setup /* cpu setup */
358
359 };
360 #endif /* CPU_ARM9E || CPU_ARM10 */
361
362 #ifdef CPU_ARM10
363 struct cpu_functions arm10_cpufuncs = {
364 /* CPU functions */
365
366 cpufunc_id, /* id */
367 cpufunc_nullop, /* cpwait */
368
369 /* MMU functions */
370
371 cpufunc_control, /* control */
372 cpufunc_domains, /* Domain */
373 arm10_setttb, /* Setttb */
374 cpufunc_faultstatus, /* Faultstatus */
375 cpufunc_faultaddress, /* Faultaddress */
376
377 /* TLB functions */
378
379 armv4_tlb_flushID, /* tlb_flushID */
380 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
381 armv4_tlb_flushI, /* tlb_flushI */
382 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
383 armv4_tlb_flushD, /* tlb_flushD */
384 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
385
386 /* Cache operations */
387
388 arm10_icache_sync_all, /* icache_sync_all */
389 arm10_icache_sync_range, /* icache_sync_range */
390
391 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
392 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
393 arm10_dcache_inv_range, /* dcache_inv_range */
394 arm10_dcache_wb_range, /* dcache_wb_range */
395
396 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
397 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
398 cpufunc_nullop, /* l2cache_wbinv_all */
399 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
400 (void *)cpufunc_nullop, /* l2cache_inv_range */
401 (void *)cpufunc_nullop, /* l2cache_wb_range */
402
403 /* Other functions */
404
405 cpufunc_nullop, /* flush_prefetchbuf */
406 armv4_drain_writebuf, /* drain_writebuf */
407 cpufunc_nullop, /* flush_brnchtgt_C */
408 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
409
410 (void *)cpufunc_nullop, /* sleep */
411
412 /* Soft functions */
413
414 cpufunc_null_fixup, /* dataabt_fixup */
415 cpufunc_null_fixup, /* prefetchabt_fixup */
416
417 arm10_context_switch, /* context_switch */
418
419 arm10_setup /* cpu setup */
420
421 };
422 #endif /* CPU_ARM10 */
423
424 #ifdef CPU_SA110
425 struct cpu_functions sa110_cpufuncs = {
426 /* CPU functions */
427
428 cpufunc_id, /* id */
429 cpufunc_nullop, /* cpwait */
430
431 /* MMU functions */
432
433 cpufunc_control, /* control */
434 cpufunc_domains, /* domain */
435 sa1_setttb, /* setttb */
436 cpufunc_faultstatus, /* faultstatus */
437 cpufunc_faultaddress, /* faultaddress */
438
439 /* TLB functions */
440
441 armv4_tlb_flushID, /* tlb_flushID */
442 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
443 armv4_tlb_flushI, /* tlb_flushI */
444 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
445 armv4_tlb_flushD, /* tlb_flushD */
446 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
447
448 /* Cache operations */
449
450 sa1_cache_syncI, /* icache_sync_all */
451 sa1_cache_syncI_rng, /* icache_sync_range */
452
453 sa1_cache_purgeD, /* dcache_wbinv_all */
454 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
455 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
456 sa1_cache_cleanD_rng, /* dcache_wb_range */
457
458 sa1_cache_purgeID, /* idcache_wbinv_all */
459 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
460 cpufunc_nullop, /* l2cache_wbinv_all */
461 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
462 (void *)cpufunc_nullop, /* l2cache_inv_range */
463 (void *)cpufunc_nullop, /* l2cache_wb_range */
464
465 /* Other functions */
466
467 cpufunc_nullop, /* flush_prefetchbuf */
468 armv4_drain_writebuf, /* drain_writebuf */
469 cpufunc_nullop, /* flush_brnchtgt_C */
470 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
471
472 (void *)cpufunc_nullop, /* sleep */
473
474 /* Soft functions */
475
476 cpufunc_null_fixup, /* dataabt_fixup */
477 cpufunc_null_fixup, /* prefetchabt_fixup */
478
479 sa110_context_switch, /* context_switch */
480
481 sa110_setup /* cpu setup */
482 };
483 #endif /* CPU_SA110 */
484
485 #if defined(CPU_SA1100) || defined(CPU_SA1110)
486 struct cpu_functions sa11x0_cpufuncs = {
487 /* CPU functions */
488
489 cpufunc_id, /* id */
490 cpufunc_nullop, /* cpwait */
491
492 /* MMU functions */
493
494 cpufunc_control, /* control */
495 cpufunc_domains, /* domain */
496 sa1_setttb, /* setttb */
497 cpufunc_faultstatus, /* faultstatus */
498 cpufunc_faultaddress, /* faultaddress */
499
500 /* TLB functions */
501
502 armv4_tlb_flushID, /* tlb_flushID */
503 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
504 armv4_tlb_flushI, /* tlb_flushI */
505 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
506 armv4_tlb_flushD, /* tlb_flushD */
507 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
508
509 /* Cache operations */
510
511 sa1_cache_syncI, /* icache_sync_all */
512 sa1_cache_syncI_rng, /* icache_sync_range */
513
514 sa1_cache_purgeD, /* dcache_wbinv_all */
515 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
516 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
517 sa1_cache_cleanD_rng, /* dcache_wb_range */
518
519 sa1_cache_purgeID, /* idcache_wbinv_all */
520 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
521 cpufunc_nullop, /* l2cache_wbinv_all */
522 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
523 (void *)cpufunc_nullop, /* l2cache_inv_range */
524 (void *)cpufunc_nullop, /* l2cache_wb_range */
525
526 /* Other functions */
527
528 sa11x0_drain_readbuf, /* flush_prefetchbuf */
529 armv4_drain_writebuf, /* drain_writebuf */
530 cpufunc_nullop, /* flush_brnchtgt_C */
531 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
532
533 sa11x0_cpu_sleep, /* sleep */
534
535 /* Soft functions */
536
537 cpufunc_null_fixup, /* dataabt_fixup */
538 cpufunc_null_fixup, /* prefetchabt_fixup */
539
540 sa11x0_context_switch, /* context_switch */
541
542 sa11x0_setup /* cpu setup */
543 };
544 #endif /* CPU_SA1100 || CPU_SA1110 */
545
546 #ifdef CPU_IXP12X0
547 struct cpu_functions ixp12x0_cpufuncs = {
548 /* CPU functions */
549
550 cpufunc_id, /* id */
551 cpufunc_nullop, /* cpwait */
552
553 /* MMU functions */
554
555 cpufunc_control, /* control */
556 cpufunc_domains, /* domain */
557 sa1_setttb, /* setttb */
558 cpufunc_faultstatus, /* faultstatus */
559 cpufunc_faultaddress, /* faultaddress */
560
561 /* TLB functions */
562
563 armv4_tlb_flushID, /* tlb_flushID */
564 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
565 armv4_tlb_flushI, /* tlb_flushI */
566 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
567 armv4_tlb_flushD, /* tlb_flushD */
568 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
569
570 /* Cache operations */
571
572 sa1_cache_syncI, /* icache_sync_all */
573 sa1_cache_syncI_rng, /* icache_sync_range */
574
575 sa1_cache_purgeD, /* dcache_wbinv_all */
576 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
577 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
578 sa1_cache_cleanD_rng, /* dcache_wb_range */
579
580 sa1_cache_purgeID, /* idcache_wbinv_all */
581 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
582 cpufunc_nullop, /* l2cache_wbinv_all */
583 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
584 (void *)cpufunc_nullop, /* l2cache_inv_range */
585 (void *)cpufunc_nullop, /* l2cache_wb_range */
586
587 /* Other functions */
588
589 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
590 armv4_drain_writebuf, /* drain_writebuf */
591 cpufunc_nullop, /* flush_brnchtgt_C */
592 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
593
594 (void *)cpufunc_nullop, /* sleep */
595
596 /* Soft functions */
597
598 cpufunc_null_fixup, /* dataabt_fixup */
599 cpufunc_null_fixup, /* prefetchabt_fixup */
600
601 ixp12x0_context_switch, /* context_switch */
602
603 ixp12x0_setup /* cpu setup */
604 };
605 #endif /* CPU_IXP12X0 */
606
607 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
608 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
609 defined(CPU_XSCALE_80219)
610
611 struct cpu_functions xscale_cpufuncs = {
612 /* CPU functions */
613
614 cpufunc_id, /* id */
615 xscale_cpwait, /* cpwait */
616
617 /* MMU functions */
618
619 xscale_control, /* control */
620 cpufunc_domains, /* domain */
621 xscale_setttb, /* setttb */
622 cpufunc_faultstatus, /* faultstatus */
623 cpufunc_faultaddress, /* faultaddress */
624
625 /* TLB functions */
626
627 armv4_tlb_flushID, /* tlb_flushID */
628 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
629 armv4_tlb_flushI, /* tlb_flushI */
630 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
631 armv4_tlb_flushD, /* tlb_flushD */
632 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
633
634 /* Cache operations */
635
636 xscale_cache_syncI, /* icache_sync_all */
637 xscale_cache_syncI_rng, /* icache_sync_range */
638
639 xscale_cache_purgeD, /* dcache_wbinv_all */
640 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
641 xscale_cache_flushD_rng, /* dcache_inv_range */
642 xscale_cache_cleanD_rng, /* dcache_wb_range */
643
644 xscale_cache_purgeID, /* idcache_wbinv_all */
645 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
646 cpufunc_nullop, /* l2cache_wbinv_all */
647 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
648 (void *)cpufunc_nullop, /* l2cache_inv_range */
649 (void *)cpufunc_nullop, /* l2cache_wb_range */
650
651 /* Other functions */
652
653 cpufunc_nullop, /* flush_prefetchbuf */
654 armv4_drain_writebuf, /* drain_writebuf */
655 cpufunc_nullop, /* flush_brnchtgt_C */
656 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
657
658 xscale_cpu_sleep, /* sleep */
659
660 /* Soft functions */
661
662 cpufunc_null_fixup, /* dataabt_fixup */
663 cpufunc_null_fixup, /* prefetchabt_fixup */
664
665 xscale_context_switch, /* context_switch */
666
667 xscale_setup /* cpu setup */
668 };
669 #endif
670 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
671 CPU_XSCALE_80219 */
672
673 #ifdef CPU_XSCALE_81342
674 struct cpu_functions xscalec3_cpufuncs = {
675 /* CPU functions */
676
677 cpufunc_id, /* id */
678 xscale_cpwait, /* cpwait */
679
680 /* MMU functions */
681
682 xscale_control, /* control */
683 cpufunc_domains, /* domain */
684 xscalec3_setttb, /* setttb */
685 cpufunc_faultstatus, /* faultstatus */
686 cpufunc_faultaddress, /* faultaddress */
687
688 /* TLB functions */
689
690 armv4_tlb_flushID, /* tlb_flushID */
691 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
692 armv4_tlb_flushI, /* tlb_flushI */
693 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
694 armv4_tlb_flushD, /* tlb_flushD */
695 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
696
697 /* Cache operations */
698
699 xscalec3_cache_syncI, /* icache_sync_all */
700 xscalec3_cache_syncI_rng, /* icache_sync_range */
701
702 xscalec3_cache_purgeD, /* dcache_wbinv_all */
703 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
704 xscale_cache_flushD_rng, /* dcache_inv_range */
705 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
706
707 xscalec3_cache_purgeID, /* idcache_wbinv_all */
708 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
709 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
710 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
711 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
712 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
713
714 /* Other functions */
715
716 cpufunc_nullop, /* flush_prefetchbuf */
717 armv4_drain_writebuf, /* drain_writebuf */
718 cpufunc_nullop, /* flush_brnchtgt_C */
719 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
720
721 xscale_cpu_sleep, /* sleep */
722
723 /* Soft functions */
724
725 cpufunc_null_fixup, /* dataabt_fixup */
726 cpufunc_null_fixup, /* prefetchabt_fixup */
727
728 xscalec3_context_switch, /* context_switch */
729
730 xscale_setup /* cpu setup */
731 };
732 #endif /* CPU_XSCALE_81342 */
733 /*
734 * Global constants also used by locore.s
735 */
736
737 struct cpu_functions cpufuncs;
738 u_int cputype;
739 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
740
741 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
742 defined (CPU_ARM9E) || defined (CPU_ARM10) || \
743 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
744 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
745 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
746
747 static void get_cachetype_cp15(void);
748
749 /* Additional cache information local to this file. Log2 of some of the
750 above numbers. */
751 static int arm_dcache_l2_nsets;
752 static int arm_dcache_l2_assoc;
753 static int arm_dcache_l2_linesize;
754
755 static void
756 get_cachetype_cp15()
757 {
758 u_int ctype, isize, dsize;
759 u_int multiplier;
760
761 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
762 : "=r" (ctype));
763
764 /*
765 * ...and thus spake the ARM ARM:
766 *
767 * If an <opcode2> value corresponding to an unimplemented or
768 * reserved ID register is encountered, the System Control
769 * processor returns the value of the main ID register.
770 */
771 if (ctype == cpufunc_id())
772 goto out;
773
774 if ((ctype & CPU_CT_S) == 0)
775 arm_pcache_unified = 1;
776
777 /*
778 * If you want to know how this code works, go read the ARM ARM.
779 */
780
781 arm_pcache_type = CPU_CT_CTYPE(ctype);
782
783 if (arm_pcache_unified == 0) {
784 isize = CPU_CT_ISIZE(ctype);
785 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
786 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
787 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
788 if (isize & CPU_CT_xSIZE_M)
789 arm_picache_line_size = 0; /* not present */
790 else
791 arm_picache_ways = 1;
792 } else {
793 arm_picache_ways = multiplier <<
794 (CPU_CT_xSIZE_ASSOC(isize) - 1);
795 }
796 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
797 }
798
799 dsize = CPU_CT_DSIZE(ctype);
800 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
801 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
802 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
803 if (dsize & CPU_CT_xSIZE_M)
804 arm_pdcache_line_size = 0; /* not present */
805 else
806 arm_pdcache_ways = 1;
807 } else {
808 arm_pdcache_ways = multiplier <<
809 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
810 }
811 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
812
813 arm_dcache_align = arm_pdcache_line_size;
814
815 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
816 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
817 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
818 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
819
820 out:
821 arm_dcache_align_mask = arm_dcache_align - 1;
822 }
823 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
824
825 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
826 defined(CPU_IXP12X0)
827 /* Cache information for CPUs without cache type registers. */
828 struct cachetab {
829 u_int32_t ct_cpuid;
830 int ct_pcache_type;
831 int ct_pcache_unified;
832 int ct_pdcache_size;
833 int ct_pdcache_line_size;
834 int ct_pdcache_ways;
835 int ct_picache_size;
836 int ct_picache_line_size;
837 int ct_picache_ways;
838 };
839
840 struct cachetab cachetab[] = {
841 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
842 /* XXX is this type right for SA-1? */
843 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
844 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
845 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
846 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
847 { 0, 0, 0, 0, 0, 0, 0, 0}
848 };
849
850 static void get_cachetype_table(void);
851
852 static void
853 get_cachetype_table()
854 {
855 int i;
856 u_int32_t cpuid = cpufunc_id();
857
858 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
859 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
860 arm_pcache_type = cachetab[i].ct_pcache_type;
861 arm_pcache_unified = cachetab[i].ct_pcache_unified;
862 arm_pdcache_size = cachetab[i].ct_pdcache_size;
863 arm_pdcache_line_size =
864 cachetab[i].ct_pdcache_line_size;
865 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
866 arm_picache_size = cachetab[i].ct_picache_size;
867 arm_picache_line_size =
868 cachetab[i].ct_picache_line_size;
869 arm_picache_ways = cachetab[i].ct_picache_ways;
870 }
871 }
872 arm_dcache_align = arm_pdcache_line_size;
873
874 arm_dcache_align_mask = arm_dcache_align - 1;
875 }
876
877 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
878
879 /*
880 * Cannot panic here as we may not have a console yet ...
881 */
882
883 int
884 set_cpufuncs()
885 {
886 cputype = cpufunc_id();
887 cputype &= CPU_ID_CPU_MASK;
888
889 /*
890 * NOTE: cpu_do_powersave defaults to off. If we encounter a
891 * CPU type where we want to use it by default, then we set it.
892 */
893
894 #ifdef CPU_ARM7TDMI
895 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
896 CPU_ID_IS7(cputype) &&
897 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
898 cpufuncs = arm7tdmi_cpufuncs;
899 cpu_reset_needs_v4_MMU_disable = 0;
900 get_cachetype_cp15();
901 pmap_pte_init_generic();
902 goto out;
903 }
904 #endif
905 #ifdef CPU_ARM8
906 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
907 (cputype & 0x0000f000) == 0x00008000) {
908 cpufuncs = arm8_cpufuncs;
909 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
910 get_cachetype_cp15();
911 pmap_pte_init_arm8();
912 goto out;
913 }
914 #endif /* CPU_ARM8 */
915 #ifdef CPU_ARM9
916 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
917 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
918 (cputype & 0x0000f000) == 0x00009000) {
919 cpufuncs = arm9_cpufuncs;
920 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
921 get_cachetype_cp15();
922 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
923 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
924 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
925 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
926 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
927 #ifdef ARM9_CACHE_WRITE_THROUGH
928 pmap_pte_init_arm9();
929 #else
930 pmap_pte_init_generic();
931 #endif
932 goto out;
933 }
934 #endif /* CPU_ARM9 */
935 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
936 if (cputype == CPU_ID_ARM926EJS ||
937 cputype == CPU_ID_ARM1026EJS) {
938 cpufuncs = armv5_ec_cpufuncs;
939 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
940 get_cachetype_cp15();
941 pmap_pte_init_generic();
942 goto out;
943 }
944 #endif /* CPU_ARM9E || CPU_ARM10 */
945 #ifdef CPU_ARM10
946 if (/* cputype == CPU_ID_ARM1020T || */
947 cputype == CPU_ID_ARM1020E) {
948 /*
949 * Select write-through cacheing (this isn't really an
950 * option on ARM1020T).
951 */
952 cpufuncs = arm10_cpufuncs;
953 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
954 get_cachetype_cp15();
955 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
956 arm10_dcache_sets_max =
957 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
958 arm10_dcache_sets_inc;
959 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
960 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
961 pmap_pte_init_generic();
962 goto out;
963 }
964 #endif /* CPU_ARM10 */
965 #ifdef CPU_SA110
966 if (cputype == CPU_ID_SA110) {
967 cpufuncs = sa110_cpufuncs;
968 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
969 get_cachetype_table();
970 pmap_pte_init_sa1();
971 goto out;
972 }
973 #endif /* CPU_SA110 */
974 #ifdef CPU_SA1100
975 if (cputype == CPU_ID_SA1100) {
976 cpufuncs = sa11x0_cpufuncs;
977 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
978 get_cachetype_table();
979 pmap_pte_init_sa1();
980 /* Use powersave on this CPU. */
981 cpu_do_powersave = 1;
982
983 goto out;
984 }
985 #endif /* CPU_SA1100 */
986 #ifdef CPU_SA1110
987 if (cputype == CPU_ID_SA1110) {
988 cpufuncs = sa11x0_cpufuncs;
989 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
990 get_cachetype_table();
991 pmap_pte_init_sa1();
992 /* Use powersave on this CPU. */
993 cpu_do_powersave = 1;
994
995 goto out;
996 }
997 #endif /* CPU_SA1110 */
998 #ifdef CPU_IXP12X0
999 if (cputype == CPU_ID_IXP1200) {
1000 cpufuncs = ixp12x0_cpufuncs;
1001 cpu_reset_needs_v4_MMU_disable = 1;
1002 get_cachetype_table();
1003 pmap_pte_init_sa1();
1004 goto out;
1005 }
1006 #endif /* CPU_IXP12X0 */
1007 #ifdef CPU_XSCALE_80200
1008 if (cputype == CPU_ID_80200) {
1009 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1010
1011 i80200_icu_init();
1012
1013 /*
1014 * Reset the Performance Monitoring Unit to a
1015 * pristine state:
1016 * - CCNT, PMN0, PMN1 reset to 0
1017 * - overflow indications cleared
1018 * - all counters disabled
1019 */
1020 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1021 :
1022 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1023 PMNC_CC_IF));
1024
1025 #if defined(XSCALE_CCLKCFG)
1026 /*
1027 * Crank CCLKCFG to maximum legal value.
1028 */
1029 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1030 :
1031 : "r" (XSCALE_CCLKCFG));
1032 #endif
1033
1034 /*
1035 * XXX Disable ECC in the Bus Controller Unit; we
1036 * don't really support it, yet. Clear any pending
1037 * error indications.
1038 */
1039 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1040 :
1041 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1042
1043 cpufuncs = xscale_cpufuncs;
1044 #if defined(PERFCTRS)
1045 xscale_pmu_init();
1046 #endif
1047
1048 /*
1049 * i80200 errata: Step-A0 and A1 have a bug where
1050 * D$ dirty bits are not cleared on "invalidate by
1051 * address".
1052 *
1053 * Workaround: Clean cache line before invalidating.
1054 */
1055 if (rev == 0 || rev == 1)
1056 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1057
1058 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1059 get_cachetype_cp15();
1060 pmap_pte_init_xscale();
1061 goto out;
1062 }
1063 #endif /* CPU_XSCALE_80200 */
1064 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1065 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1066 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1067 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1068 /*
1069 * Reset the Performance Monitoring Unit to a
1070 * pristine state:
1071 * - CCNT, PMN0, PMN1 reset to 0
1072 * - overflow indications cleared
1073 * - all counters disabled
1074 */
1075 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1076 :
1077 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1078 PMNC_CC_IF));
1079
1080 cpufuncs = xscale_cpufuncs;
1081 #if defined(PERFCTRS)
1082 xscale_pmu_init();
1083 #endif
1084
1085 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1086 get_cachetype_cp15();
1087 pmap_pte_init_xscale();
1088 goto out;
1089 }
1090 #endif /* CPU_XSCALE_80321 */
1091
1092 #if defined(CPU_XSCALE_81342)
1093 if (cputype == CPU_ID_81342) {
1094 cpufuncs = xscalec3_cpufuncs;
1095 #if defined(PERFCTRS)
1096 xscale_pmu_init();
1097 #endif
1098
1099 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1100 get_cachetype_cp15();
1101 pmap_pte_init_xscale();
1102 goto out;
1103 }
1104 #endif /* CPU_XSCALE_81342 */
1105 #ifdef CPU_XSCALE_PXA2X0
1106 /* ignore core revision to test PXA2xx CPUs */
1107 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1108 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1109
1110 cpufuncs = xscale_cpufuncs;
1111 #if defined(PERFCTRS)
1112 xscale_pmu_init();
1113 #endif
1114
1115 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1116 get_cachetype_cp15();
1117 pmap_pte_init_xscale();
1118
1119 /* Use powersave on this CPU. */
1120 cpu_do_powersave = 1;
1121
1122 goto out;
1123 }
1124 #endif /* CPU_XSCALE_PXA2X0 */
1125 #ifdef CPU_XSCALE_IXP425
1126 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1127 cputype == CPU_ID_IXP425_266) {
1128
1129 cpufuncs = xscale_cpufuncs;
1130 #if defined(PERFCTRS)
1131 xscale_pmu_init();
1132 #endif
1133
1134 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1135 get_cachetype_cp15();
1136 pmap_pte_init_xscale();
1137
1138 goto out;
1139 }
1140 #endif /* CPU_XSCALE_IXP425 */
1141 /*
1142 * Bzzzz. And the answer was ...
1143 */
1144 panic("No support for this CPU type (%08x) in kernel", cputype);
1145 return(ARCHITECTURE_NOT_PRESENT);
1146 out:
1147 uma_set_align(arm_dcache_align_mask);
1148 return (0);
1149 }
1150
1151 /*
1152 * Fixup routines for data and prefetch aborts.
1153 *
1154 * Several compile time symbols are used
1155 *
1156 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1157 * correction of registers after a fault.
1158 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1159 * when defined should use late aborts
1160 */
1161
1162
1163 /*
1164 * Null abort fixup routine.
1165 * For use when no fixup is required.
1166 */
1167 int
1168 cpufunc_null_fixup(arg)
1169 void *arg;
1170 {
1171 return(ABORT_FIXUP_OK);
1172 }
1173
1174
1175 #if defined(CPU_ARM7TDMI)
1176
1177 #ifdef DEBUG_FAULT_CORRECTION
1178 #define DFC_PRINTF(x) printf x
1179 #define DFC_DISASSEMBLE(x) disassemble(x)
1180 #else
1181 #define DFC_PRINTF(x) /* nothing */
1182 #define DFC_DISASSEMBLE(x) /* nothing */
1183 #endif
1184
1185 /*
1186 * "Early" data abort fixup.
1187 *
1188 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1189 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1190 *
1191 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1192 */
1193 int
1194 early_abort_fixup(arg)
1195 void *arg;
1196 {
1197 trapframe_t *frame = arg;
1198 u_int fault_pc;
1199 u_int fault_instruction;
1200 int saved_lr = 0;
1201
1202 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1203
1204 /* Ok an abort in SVC mode */
1205
1206 /*
1207 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1208 * as the fault happened in svc mode but we need it in the
1209 * usr slot so we can treat the registers as an array of ints
1210 * during fixing.
1211 * NOTE: This PC is in the position but writeback is not
1212 * allowed on r15.
1213 * Doing it like this is more efficient than trapping this
1214 * case in all possible locations in the following fixup code.
1215 */
1216
1217 saved_lr = frame->tf_usr_lr;
1218 frame->tf_usr_lr = frame->tf_svc_lr;
1219
1220 /*
1221 * Note the trapframe does not have the SVC r13 so a fault
1222 * from an instruction with writeback to r13 in SVC mode is
1223 * not allowed. This should not happen as the kstack is
1224 * always valid.
1225 */
1226 }
1227
1228 /* Get fault address and status from the CPU */
1229
1230 fault_pc = frame->tf_pc;
1231 fault_instruction = *((volatile unsigned int *)fault_pc);
1232
1233 /* Decode the fault instruction and fix the registers as needed */
1234
1235 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1236 int base;
1237 int loop;
1238 int count;
1239 int *registers = &frame->tf_r0;
1240
1241 DFC_PRINTF(("LDM/STM\n"));
1242 DFC_DISASSEMBLE(fault_pc);
1243 if (fault_instruction & (1 << 21)) {
1244 DFC_PRINTF(("This instruction must be corrected\n"));
1245 base = (fault_instruction >> 16) & 0x0f;
1246 if (base == 15)
1247 return ABORT_FIXUP_FAILED;
1248 /* Count registers transferred */
1249 count = 0;
1250 for (loop = 0; loop < 16; ++loop) {
1251 if (fault_instruction & (1<<loop))
1252 ++count;
1253 }
1254 DFC_PRINTF(("%d registers used\n", count));
1255 DFC_PRINTF(("Corrected r%d by %d bytes ",
1256 base, count * 4));
1257 if (fault_instruction & (1 << 23)) {
1258 DFC_PRINTF(("down\n"));
1259 registers[base] -= count * 4;
1260 } else {
1261 DFC_PRINTF(("up\n"));
1262 registers[base] += count * 4;
1263 }
1264 }
1265 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1266 int base;
1267 int offset;
1268 int *registers = &frame->tf_r0;
1269
1270 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1271
1272 DFC_DISASSEMBLE(fault_pc);
1273
1274 /* Only need to fix registers if write back is turned on */
1275
1276 if ((fault_instruction & (1 << 21)) != 0) {
1277 base = (fault_instruction >> 16) & 0x0f;
1278 if (base == 13 &&
1279 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1280 return ABORT_FIXUP_FAILED;
1281 if (base == 15)
1282 return ABORT_FIXUP_FAILED;
1283
1284 offset = (fault_instruction & 0xff) << 2;
1285 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1286 if ((fault_instruction & (1 << 23)) != 0)
1287 offset = -offset;
1288 registers[base] += offset;
1289 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1290 }
1291 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1292 return ABORT_FIXUP_FAILED;
1293
1294 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1295
1296 /* Ok an abort in SVC mode */
1297
1298 /*
1299 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1300 * as the fault happened in svc mode but we need it in the
1301 * usr slot so we can treat the registers as an array of ints
1302 * during fixing.
1303 * NOTE: This PC is in the position but writeback is not
1304 * allowed on r15.
1305 * Doing it like this is more efficient than trapping this
1306 * case in all possible locations in the prior fixup code.
1307 */
1308
1309 frame->tf_svc_lr = frame->tf_usr_lr;
1310 frame->tf_usr_lr = saved_lr;
1311
1312 /*
1313 * Note the trapframe does not have the SVC r13 so a fault
1314 * from an instruction with writeback to r13 in SVC mode is
1315 * not allowed. This should not happen as the kstack is
1316 * always valid.
1317 */
1318 }
1319
1320 return(ABORT_FIXUP_OK);
1321 }
1322 #endif /* CPU_ARM2/250/3/6/7 */
1323
1324
1325 #if defined(CPU_ARM7TDMI)
1326 /*
1327 * "Late" (base updated) data abort fixup
1328 *
1329 * For ARM6 (in late-abort mode) and ARM7.
1330 *
1331 * In this model, all data-transfer instructions need fixing up. We defer
1332 * LDM, STM, LDC and STC fixup to the early-abort handler.
1333 */
1334 int
1335 late_abort_fixup(arg)
1336 void *arg;
1337 {
1338 trapframe_t *frame = arg;
1339 u_int fault_pc;
1340 u_int fault_instruction;
1341 int saved_lr = 0;
1342
1343 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1344
1345 /* Ok an abort in SVC mode */
1346
1347 /*
1348 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1349 * as the fault happened in svc mode but we need it in the
1350 * usr slot so we can treat the registers as an array of ints
1351 * during fixing.
1352 * NOTE: This PC is in the position but writeback is not
1353 * allowed on r15.
1354 * Doing it like this is more efficient than trapping this
1355 * case in all possible locations in the following fixup code.
1356 */
1357
1358 saved_lr = frame->tf_usr_lr;
1359 frame->tf_usr_lr = frame->tf_svc_lr;
1360
1361 /*
1362 * Note the trapframe does not have the SVC r13 so a fault
1363 * from an instruction with writeback to r13 in SVC mode is
1364 * not allowed. This should not happen as the kstack is
1365 * always valid.
1366 */
1367 }
1368
1369 /* Get fault address and status from the CPU */
1370
1371 fault_pc = frame->tf_pc;
1372 fault_instruction = *((volatile unsigned int *)fault_pc);
1373
1374 /* Decode the fault instruction and fix the registers as needed */
1375
1376 /* Was is a swap instruction ? */
1377
1378 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1379 DFC_DISASSEMBLE(fault_pc);
1380 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1381
1382 /* Was is a ldr/str instruction */
1383 /* This is for late abort only */
1384
1385 int base;
1386 int offset;
1387 int *registers = &frame->tf_r0;
1388
1389 DFC_DISASSEMBLE(fault_pc);
1390
1391 /* This is for late abort only */
1392
1393 if ((fault_instruction & (1 << 24)) == 0
1394 || (fault_instruction & (1 << 21)) != 0) {
1395 /* postindexed ldr/str with no writeback */
1396
1397 base = (fault_instruction >> 16) & 0x0f;
1398 if (base == 13 &&
1399 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1400 return ABORT_FIXUP_FAILED;
1401 if (base == 15)
1402 return ABORT_FIXUP_FAILED;
1403 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1404 base, registers[base]));
1405 if ((fault_instruction & (1 << 25)) == 0) {
1406 /* Immediate offset - easy */
1407
1408 offset = fault_instruction & 0xfff;
1409 if ((fault_instruction & (1 << 23)))
1410 offset = -offset;
1411 registers[base] += offset;
1412 DFC_PRINTF(("imm=%08x ", offset));
1413 } else {
1414 /* offset is a shifted register */
1415 int shift;
1416
1417 offset = fault_instruction & 0x0f;
1418 if (offset == base)
1419 return ABORT_FIXUP_FAILED;
1420
1421 /*
1422 * Register offset - hard we have to
1423 * cope with shifts !
1424 */
1425 offset = registers[offset];
1426
1427 if ((fault_instruction & (1 << 4)) == 0)
1428 /* shift with amount */
1429 shift = (fault_instruction >> 7) & 0x1f;
1430 else {
1431 /* shift with register */
1432 if ((fault_instruction & (1 << 7)) != 0)
1433 /* undefined for now so bail out */
1434 return ABORT_FIXUP_FAILED;
1435 shift = ((fault_instruction >> 8) & 0xf);
1436 if (base == shift)
1437 return ABORT_FIXUP_FAILED;
1438 DFC_PRINTF(("shift reg=%d ", shift));
1439 shift = registers[shift];
1440 }
1441 DFC_PRINTF(("shift=%08x ", shift));
1442 switch (((fault_instruction >> 5) & 0x3)) {
1443 case 0 : /* Logical left */
1444 offset = (int)(((u_int)offset) << shift);
1445 break;
1446 case 1 : /* Logical Right */
1447 if (shift == 0) shift = 32;
1448 offset = (int)(((u_int)offset) >> shift);
1449 break;
1450 case 2 : /* Arithmetic Right */
1451 if (shift == 0) shift = 32;
1452 offset = (int)(((int)offset) >> shift);
1453 break;
1454 case 3 : /* Rotate right (rol or rxx) */
1455 return ABORT_FIXUP_FAILED;
1456 break;
1457 }
1458
1459 DFC_PRINTF(("abt: fixed LDR/STR with "
1460 "register offset\n"));
1461 if ((fault_instruction & (1 << 23)))
1462 offset = -offset;
1463 DFC_PRINTF(("offset=%08x ", offset));
1464 registers[base] += offset;
1465 }
1466 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1467 }
1468 }
1469
1470 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1471
1472 /* Ok an abort in SVC mode */
1473
1474 /*
1475 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1476 * as the fault happened in svc mode but we need it in the
1477 * usr slot so we can treat the registers as an array of ints
1478 * during fixing.
1479 * NOTE: This PC is in the position but writeback is not
1480 * allowed on r15.
1481 * Doing it like this is more efficient than trapping this
1482 * case in all possible locations in the prior fixup code.
1483 */
1484
1485 frame->tf_svc_lr = frame->tf_usr_lr;
1486 frame->tf_usr_lr = saved_lr;
1487
1488 /*
1489 * Note the trapframe does not have the SVC r13 so a fault
1490 * from an instruction with writeback to r13 in SVC mode is
1491 * not allowed. This should not happen as the kstack is
1492 * always valid.
1493 */
1494 }
1495
1496 /*
1497 * Now let the early-abort fixup routine have a go, in case it
1498 * was an LDM, STM, LDC or STC that faulted.
1499 */
1500
1501 return early_abort_fixup(arg);
1502 }
1503 #endif /* CPU_ARM7TDMI */
1504
1505 /*
1506 * CPU Setup code
1507 */
1508
1509 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1510 defined(CPU_ARM9E) || \
1511 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1512 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1513 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1514 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1515 defined(CPU_ARM10) || defined(CPU_ARM11)
1516
1517 #define IGN 0
1518 #define OR 1
1519 #define BIC 2
1520
1521 struct cpu_option {
1522 char *co_name;
1523 int co_falseop;
1524 int co_trueop;
1525 int co_value;
1526 };
1527
1528 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1529
1530 static u_int
1531 parse_cpu_options(args, optlist, cpuctrl)
1532 char *args;
1533 struct cpu_option *optlist;
1534 u_int cpuctrl;
1535 {
1536 int integer;
1537
1538 if (args == NULL)
1539 return(cpuctrl);
1540
1541 while (optlist->co_name) {
1542 if (get_bootconf_option(args, optlist->co_name,
1543 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1544 if (integer) {
1545 if (optlist->co_trueop == OR)
1546 cpuctrl |= optlist->co_value;
1547 else if (optlist->co_trueop == BIC)
1548 cpuctrl &= ~optlist->co_value;
1549 } else {
1550 if (optlist->co_falseop == OR)
1551 cpuctrl |= optlist->co_value;
1552 else if (optlist->co_falseop == BIC)
1553 cpuctrl &= ~optlist->co_value;
1554 }
1555 }
1556 ++optlist;
1557 }
1558 return(cpuctrl);
1559 }
1560 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1561
1562 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1563 struct cpu_option arm678_options[] = {
1564 #ifdef COMPAT_12
1565 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1566 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1567 #endif /* COMPAT_12 */
1568 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1569 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1570 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1571 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1572 { NULL, IGN, IGN, 0 }
1573 };
1574
1575 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1576
1577 #ifdef CPU_ARM7TDMI
1578 struct cpu_option arm7tdmi_options[] = {
1579 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1580 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1581 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1582 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1583 #ifdef COMPAT_12
1584 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1585 #endif /* COMPAT_12 */
1586 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1587 { NULL, IGN, IGN, 0 }
1588 };
1589
1590 void
1591 arm7tdmi_setup(args)
1592 char *args;
1593 {
1594 int cpuctrl;
1595
1596 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1597 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1598 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1599
1600 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1601 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1602
1603 #ifdef __ARMEB__
1604 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1605 #endif
1606
1607 /* Clear out the cache */
1608 cpu_idcache_wbinv_all();
1609
1610 /* Set the control register */
1611 ctrl = cpuctrl;
1612 cpu_control(0xffffffff, cpuctrl);
1613 }
1614 #endif /* CPU_ARM7TDMI */
1615
1616 #ifdef CPU_ARM8
1617 struct cpu_option arm8_options[] = {
1618 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1619 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1620 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1621 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1622 #ifdef COMPAT_12
1623 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1624 #endif /* COMPAT_12 */
1625 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1626 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1627 { NULL, IGN, IGN, 0 }
1628 };
1629
1630 void
1631 arm8_setup(args)
1632 char *args;
1633 {
1634 int integer;
1635 int cpuctrl, cpuctrlmask;
1636 int clocktest;
1637 int setclock = 0;
1638
1639 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1640 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1641 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1642 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1643 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1644 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1645 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1646 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1647
1648 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1649 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1650 #endif
1651
1652 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1653 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1654
1655 #ifdef __ARMEB__
1656 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1657 #endif
1658
1659 /* Get clock configuration */
1660 clocktest = arm8_clock_config(0, 0) & 0x0f;
1661
1662 /* Special ARM8 clock and test configuration */
1663 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1664 clocktest = 0;
1665 setclock = 1;
1666 }
1667 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1668 if (integer)
1669 clocktest |= 0x01;
1670 else
1671 clocktest &= ~(0x01);
1672 setclock = 1;
1673 }
1674 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1675 if (integer)
1676 clocktest |= 0x02;
1677 else
1678 clocktest &= ~(0x02);
1679 setclock = 1;
1680 }
1681 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1682 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1683 setclock = 1;
1684 }
1685 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1686 clocktest |= (integer & 7) << 5;
1687 setclock = 1;
1688 }
1689
1690 /* Clear out the cache */
1691 cpu_idcache_wbinv_all();
1692
1693 /* Set the control register */
1694 ctrl = cpuctrl;
1695 cpu_control(0xffffffff, cpuctrl);
1696
1697 /* Set the clock/test register */
1698 if (setclock)
1699 arm8_clock_config(0x7f, clocktest);
1700 }
1701 #endif /* CPU_ARM8 */
1702
1703 #ifdef CPU_ARM9
1704 struct cpu_option arm9_options[] = {
1705 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1706 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1707 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1708 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1709 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1710 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1711 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1712 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1713 { NULL, IGN, IGN, 0 }
1714 };
1715
1716 void
1717 arm9_setup(args)
1718 char *args;
1719 {
1720 int cpuctrl, cpuctrlmask;
1721
1722 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1723 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1724 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1725 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1726 CPU_CONTROL_ROUNDROBIN;
1727 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1728 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1729 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1730 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1731 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1732 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1733 | CPU_CONTROL_ROUNDROBIN;
1734
1735 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1736 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1737 #endif
1738
1739 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1740
1741 #ifdef __ARMEB__
1742 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1743 #endif
1744 if (vector_page == ARM_VECTORS_HIGH)
1745 cpuctrl |= CPU_CONTROL_VECRELOC;
1746
1747 /* Clear out the cache */
1748 cpu_idcache_wbinv_all();
1749
1750 /* Set the control register */
1751 cpu_control(cpuctrlmask, cpuctrl);
1752 ctrl = cpuctrl;
1753
1754 }
1755 #endif /* CPU_ARM9 */
1756
1757 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1758 struct cpu_option arm10_options[] = {
1759 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1760 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1761 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1762 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1763 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1764 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1765 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1766 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1767 { NULL, IGN, IGN, 0 }
1768 };
1769
1770 void
1771 arm10_setup(args)
1772 char *args;
1773 {
1774 int cpuctrl, cpuctrlmask;
1775
1776 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1777 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1778 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1779 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1780 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1781 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1782 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1783 | CPU_CONTROL_BPRD_ENABLE
1784 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1785
1786 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1787 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1788 #endif
1789
1790 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1791
1792 #ifdef __ARMEB__
1793 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1794 #endif
1795
1796 /* Clear out the cache */
1797 cpu_idcache_wbinv_all();
1798
1799 /* Now really make sure they are clean. */
1800 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1801
1802 if (vector_page == ARM_VECTORS_HIGH)
1803 cpuctrl |= CPU_CONTROL_VECRELOC;
1804
1805 /* Set the control register */
1806 ctrl = cpuctrl;
1807 cpu_control(0xffffffff, cpuctrl);
1808
1809 /* And again. */
1810 cpu_idcache_wbinv_all();
1811 }
1812 #endif /* CPU_ARM9E || CPU_ARM10 */
1813
1814 #ifdef CPU_ARM11
1815 struct cpu_option arm11_options[] = {
1816 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1817 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1818 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1819 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1820 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1821 { NULL, IGN, IGN, 0 }
1822 };
1823
1824 void
1825 arm11_setup(args)
1826 char *args;
1827 {
1828 int cpuctrl, cpuctrlmask;
1829
1830 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1831 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1832 /* | CPU_CONTROL_BPRD_ENABLE */;
1833 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1834 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1835 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1836 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1837 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1838
1839 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1840 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1841 #endif
1842
1843 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1844
1845 #ifdef __ARMEB__
1846 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1847 #endif
1848
1849 /* Clear out the cache */
1850 cpu_idcache_wbinv_all();
1851
1852 /* Now really make sure they are clean. */
1853 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1854
1855 /* Set the control register */
1856 curcpu()->ci_ctrl = cpuctrl;
1857 cpu_control(0xffffffff, cpuctrl);
1858
1859 /* And again. */
1860 cpu_idcache_wbinv_all();
1861 }
1862 #endif /* CPU_ARM11 */
1863
1864 #ifdef CPU_SA110
1865 struct cpu_option sa110_options[] = {
1866 #ifdef COMPAT_12
1867 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1868 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1869 #endif /* COMPAT_12 */
1870 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1871 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1872 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1873 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1874 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1875 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1876 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1877 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1878 { NULL, IGN, IGN, 0 }
1879 };
1880
1881 void
1882 sa110_setup(args)
1883 char *args;
1884 {
1885 int cpuctrl, cpuctrlmask;
1886
1887 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1888 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1889 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1890 | CPU_CONTROL_WBUF_ENABLE;
1891 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1892 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1893 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1894 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1895 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1896 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1897 | CPU_CONTROL_CPCLK;
1898
1899 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1900 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1901 #endif
1902
1903 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1904
1905 #ifdef __ARMEB__
1906 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1907 #endif
1908
1909 /* Clear out the cache */
1910 cpu_idcache_wbinv_all();
1911
1912 /* Set the control register */
1913 ctrl = cpuctrl;
1914 /* cpu_control(cpuctrlmask, cpuctrl);*/
1915 cpu_control(0xffffffff, cpuctrl);
1916
1917 /*
1918 * enable clockswitching, note that this doesn't read or write to r0,
1919 * r0 is just to make it valid asm
1920 */
1921 __asm ("mcr 15, 0, r0, c15, c1, 2");
1922 }
1923 #endif /* CPU_SA110 */
1924
1925 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1926 struct cpu_option sa11x0_options[] = {
1927 #ifdef COMPAT_12
1928 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1929 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1930 #endif /* COMPAT_12 */
1931 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1932 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1933 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1934 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1935 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1936 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1937 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1938 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1939 { NULL, IGN, IGN, 0 }
1940 };
1941
1942 void
1943 sa11x0_setup(args)
1944 char *args;
1945 {
1946 int cpuctrl, cpuctrlmask;
1947
1948 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1949 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1950 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1951 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1952 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1953 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1954 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1955 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1956 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1957 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1958 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1959
1960 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1961 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1962 #endif
1963
1964
1965 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1966
1967 #ifdef __ARMEB__
1968 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1969 #endif
1970
1971 if (vector_page == ARM_VECTORS_HIGH)
1972 cpuctrl |= CPU_CONTROL_VECRELOC;
1973 /* Clear out the cache */
1974 cpu_idcache_wbinv_all();
1975 /* Set the control register */
1976 ctrl = cpuctrl;
1977 cpu_control(0xffffffff, cpuctrl);
1978 }
1979 #endif /* CPU_SA1100 || CPU_SA1110 */
1980
1981 #if defined(CPU_IXP12X0)
1982 struct cpu_option ixp12x0_options[] = {
1983 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1984 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1985 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1986 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1987 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1988 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1989 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1990 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1991 { NULL, IGN, IGN, 0 }
1992 };
1993
1994 void
1995 ixp12x0_setup(args)
1996 char *args;
1997 {
1998 int cpuctrl, cpuctrlmask;
1999
2000
2001 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2002 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2003 | CPU_CONTROL_IC_ENABLE;
2004
2005 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2006 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2007 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2008 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2009 | CPU_CONTROL_VECRELOC;
2010
2011 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2012 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2013 #endif
2014
2015 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2016
2017 #ifdef __ARMEB__
2018 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2019 #endif
2020
2021 if (vector_page == ARM_VECTORS_HIGH)
2022 cpuctrl |= CPU_CONTROL_VECRELOC;
2023
2024 /* Clear out the cache */
2025 cpu_idcache_wbinv_all();
2026
2027 /* Set the control register */
2028 ctrl = cpuctrl;
2029 /* cpu_control(0xffffffff, cpuctrl); */
2030 cpu_control(cpuctrlmask, cpuctrl);
2031 }
2032 #endif /* CPU_IXP12X0 */
2033
2034 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2035 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2036 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2037 struct cpu_option xscale_options[] = {
2038 #ifdef COMPAT_12
2039 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2040 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2041 #endif /* COMPAT_12 */
2042 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2043 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2044 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2045 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2046 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2047 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2048 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2049 { NULL, IGN, IGN, 0 }
2050 };
2051
2052 void
2053 xscale_setup(args)
2054 char *args;
2055 {
2056 uint32_t auxctl;
2057 int cpuctrl, cpuctrlmask;
2058
2059 /*
2060 * The XScale Write Buffer is always enabled. Our option
2061 * is to enable/disable coalescing. Note that bits 6:3
2062 * must always be enabled.
2063 */
2064
2065 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2066 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2067 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2068 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2069 | CPU_CONTROL_BPRD_ENABLE;
2070 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2071 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2072 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2073 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2074 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2075 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2076 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2077 CPU_CONTROL_L2_ENABLE;
2078
2079 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2080 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2081 #endif
2082
2083 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2084
2085 #ifdef __ARMEB__
2086 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2087 #endif
2088
2089 if (vector_page == ARM_VECTORS_HIGH)
2090 cpuctrl |= CPU_CONTROL_VECRELOC;
2091 #ifdef CPU_XSCALE_CORE3
2092 cpuctrl |= CPU_CONTROL_L2_ENABLE;
2093 #endif
2094
2095 /* Clear out the cache */
2096 cpu_idcache_wbinv_all();
2097
2098 /*
2099 * Set the control register. Note that bits 6:3 must always
2100 * be set to 1.
2101 */
2102 ctrl = cpuctrl;
2103 /* cpu_control(cpuctrlmask, cpuctrl);*/
2104 cpu_control(0xffffffff, cpuctrl);
2105
2106 /* Make sure write coalescing is turned on */
2107 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2108 : "=r" (auxctl));
2109 #ifdef XSCALE_NO_COALESCE_WRITES
2110 auxctl |= XSCALE_AUXCTL_K;
2111 #else
2112 auxctl &= ~XSCALE_AUXCTL_K;
2113 #endif
2114 #ifdef CPU_XSCALE_CORE3
2115 auxctl |= XSCALE_AUXCTL_LLR;
2116 auxctl |= XSCALE_AUXCTL_MD_MASK;
2117 #endif
2118 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2119 : : "r" (auxctl));
2120 }
2121 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
2122 CPU_XSCALE_80219 */
Cache object: 4a3d4b65fb77dfb4dde3cc2ccfa2353d
|