FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc.c
1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created : 30/01/97
46 */
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD: releng/6.0/sys/arm/arm/cpufunc.c 146619 2005-05-25 13:46:32Z cognet $");
49
50 #include <sys/cdefs.h>
51
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/bus.h>
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/disassem.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #include <machine/cpuconf.h>
66 #include <machine/cpufunc.h>
67 #include <machine/bootconfig.h>
68
69 #ifdef CPU_XSCALE_80200
70 #include <arm/xscale/i80200/i80200reg.h>
71 #include <arm/xscale/i80200/i80200var.h>
72 #endif
73
74 #ifdef CPU_XSCALE_80321
75 #include <arm/xscale/i80321/i80321reg.h>
76 #include <arm/xscale/i80321/i80321var.h>
77 #endif
78
79 #ifdef CPU_XSCALE_IXP425
80 #include <arm/xscale/ixp425/ixp425reg.h>
81 #include <arm/xscale/ixp425/ixp425var.h>
82 #endif
83
84 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
85 #include <arm/xscale/xscalereg.h>
86 #endif
87
88 #if defined(PERFCTRS)
89 struct arm_pmc_funcs *arm_pmc;
90 #endif
91
92 /* PRIMARY CACHE VARIABLES */
93 int arm_picache_size;
94 int arm_picache_line_size;
95 int arm_picache_ways;
96
97 int arm_pdcache_size; /* and unified */
98 int arm_pdcache_line_size;
99 int arm_pdcache_ways;
100
101 int arm_pcache_type;
102 int arm_pcache_unified;
103
104 int arm_dcache_align;
105 int arm_dcache_align_mask;
106
107 /* 1 == use cpu_sleep(), 0 == don't */
108 int cpu_do_powersave;
109 int ctrl;
110
111 #ifdef CPU_ARM7TDMI
112 struct cpu_functions arm7tdmi_cpufuncs = {
113 /* CPU functions */
114
115 cpufunc_id, /* id */
116 cpufunc_nullop, /* cpwait */
117
118 /* MMU functions */
119
120 cpufunc_control, /* control */
121 cpufunc_domains, /* domain */
122 arm7tdmi_setttb, /* setttb */
123 cpufunc_faultstatus, /* faultstatus */
124 cpufunc_faultaddress, /* faultaddress */
125
126 /* TLB functions */
127
128 arm7tdmi_tlb_flushID, /* tlb_flushID */
129 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
130 arm7tdmi_tlb_flushID, /* tlb_flushI */
131 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
132 arm7tdmi_tlb_flushID, /* tlb_flushD */
133 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
134
135 /* Cache operations */
136
137 cpufunc_nullop, /* icache_sync_all */
138 (void *)cpufunc_nullop, /* icache_sync_range */
139
140 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
141 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
142 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
143 (void *)cpufunc_nullop, /* dcache_wb_range */
144
145 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
146 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
147
148 /* Other functions */
149
150 cpufunc_nullop, /* flush_prefetchbuf */
151 cpufunc_nullop, /* drain_writebuf */
152 cpufunc_nullop, /* flush_brnchtgt_C */
153 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
154
155 (void *)cpufunc_nullop, /* sleep */
156
157 /* Soft functions */
158
159 late_abort_fixup, /* dataabt_fixup */
160 cpufunc_null_fixup, /* prefetchabt_fixup */
161
162 arm7tdmi_context_switch, /* context_switch */
163
164 arm7tdmi_setup /* cpu setup */
165
166 };
167 #endif /* CPU_ARM7TDMI */
168
169 #ifdef CPU_ARM8
170 struct cpu_functions arm8_cpufuncs = {
171 /* CPU functions */
172
173 cpufunc_id, /* id */
174 cpufunc_nullop, /* cpwait */
175
176 /* MMU functions */
177
178 cpufunc_control, /* control */
179 cpufunc_domains, /* domain */
180 arm8_setttb, /* setttb */
181 cpufunc_faultstatus, /* faultstatus */
182 cpufunc_faultaddress, /* faultaddress */
183
184 /* TLB functions */
185
186 arm8_tlb_flushID, /* tlb_flushID */
187 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
188 arm8_tlb_flushID, /* tlb_flushI */
189 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
190 arm8_tlb_flushID, /* tlb_flushD */
191 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
192
193 /* Cache operations */
194
195 cpufunc_nullop, /* icache_sync_all */
196 (void *)cpufunc_nullop, /* icache_sync_range */
197
198 arm8_cache_purgeID, /* dcache_wbinv_all */
199 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
200 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
201 (void *)arm8_cache_cleanID, /* dcache_wb_range */
202
203 arm8_cache_purgeID, /* idcache_wbinv_all */
204 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
205
206 /* Other functions */
207
208 cpufunc_nullop, /* flush_prefetchbuf */
209 cpufunc_nullop, /* drain_writebuf */
210 cpufunc_nullop, /* flush_brnchtgt_C */
211 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
212
213 (void *)cpufunc_nullop, /* sleep */
214
215 /* Soft functions */
216
217 cpufunc_null_fixup, /* dataabt_fixup */
218 cpufunc_null_fixup, /* prefetchabt_fixup */
219
220 arm8_context_switch, /* context_switch */
221
222 arm8_setup /* cpu setup */
223 };
224 #endif /* CPU_ARM8 */
225
226 #ifdef CPU_ARM9
227 struct cpu_functions arm9_cpufuncs = {
228 /* CPU functions */
229
230 cpufunc_id, /* id */
231 cpufunc_nullop, /* cpwait */
232
233 /* MMU functions */
234
235 cpufunc_control, /* control */
236 cpufunc_domains, /* Domain */
237 arm9_setttb, /* Setttb */
238 cpufunc_faultstatus, /* Faultstatus */
239 cpufunc_faultaddress, /* Faultaddress */
240
241 /* TLB functions */
242
243 armv4_tlb_flushID, /* tlb_flushID */
244 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
245 armv4_tlb_flushI, /* tlb_flushI */
246 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
247 armv4_tlb_flushD, /* tlb_flushD */
248 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
249
250 /* Cache operations */
251
252 arm9_icache_sync_all, /* icache_sync_all */
253 arm9_icache_sync_range, /* icache_sync_range */
254
255 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
256 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
257 /*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
258 arm9_dcache_wb_range, /* dcache_wb_range */
259
260 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
261 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
262
263 /* Other functions */
264
265 cpufunc_nullop, /* flush_prefetchbuf */
266 armv4_drain_writebuf, /* drain_writebuf */
267 cpufunc_nullop, /* flush_brnchtgt_C */
268 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
269
270 (void *)cpufunc_nullop, /* sleep */
271
272 /* Soft functions */
273
274 cpufunc_null_fixup, /* dataabt_fixup */
275 cpufunc_null_fixup, /* prefetchabt_fixup */
276
277 arm9_context_switch, /* context_switch */
278
279 arm9_setup /* cpu setup */
280
281 };
282 #endif /* CPU_ARM9 */
283
284 #ifdef CPU_ARM10
285 struct cpu_functions arm10_cpufuncs = {
286 /* CPU functions */
287
288 cpufunc_id, /* id */
289 cpufunc_nullop, /* cpwait */
290
291 /* MMU functions */
292
293 cpufunc_control, /* control */
294 cpufunc_domains, /* Domain */
295 arm10_setttb, /* Setttb */
296 cpufunc_faultstatus, /* Faultstatus */
297 cpufunc_faultaddress, /* Faultaddress */
298
299 /* TLB functions */
300
301 armv4_tlb_flushID, /* tlb_flushID */
302 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
303 armv4_tlb_flushI, /* tlb_flushI */
304 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
305 armv4_tlb_flushD, /* tlb_flushD */
306 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
307
308 /* Cache operations */
309
310 arm10_icache_sync_all, /* icache_sync_all */
311 arm10_icache_sync_range, /* icache_sync_range */
312
313 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
314 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
315 arm10_dcache_inv_range, /* dcache_inv_range */
316 arm10_dcache_wb_range, /* dcache_wb_range */
317
318 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
319 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
320
321 /* Other functions */
322
323 cpufunc_nullop, /* flush_prefetchbuf */
324 armv4_drain_writebuf, /* drain_writebuf */
325 cpufunc_nullop, /* flush_brnchtgt_C */
326 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
327
328 (void *)cpufunc_nullop, /* sleep */
329
330 /* Soft functions */
331
332 cpufunc_null_fixup, /* dataabt_fixup */
333 cpufunc_null_fixup, /* prefetchabt_fixup */
334
335 arm10_context_switch, /* context_switch */
336
337 arm10_setup /* cpu setup */
338
339 };
340 #endif /* CPU_ARM10 */
341
342 #ifdef CPU_SA110
343 struct cpu_functions sa110_cpufuncs = {
344 /* CPU functions */
345
346 cpufunc_id, /* id */
347 cpufunc_nullop, /* cpwait */
348
349 /* MMU functions */
350
351 cpufunc_control, /* control */
352 cpufunc_domains, /* domain */
353 sa1_setttb, /* setttb */
354 cpufunc_faultstatus, /* faultstatus */
355 cpufunc_faultaddress, /* faultaddress */
356
357 /* TLB functions */
358
359 armv4_tlb_flushID, /* tlb_flushID */
360 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
361 armv4_tlb_flushI, /* tlb_flushI */
362 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
363 armv4_tlb_flushD, /* tlb_flushD */
364 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
365
366 /* Cache operations */
367
368 sa1_cache_syncI, /* icache_sync_all */
369 sa1_cache_syncI_rng, /* icache_sync_range */
370
371 sa1_cache_purgeD, /* dcache_wbinv_all */
372 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
373 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
374 sa1_cache_cleanD_rng, /* dcache_wb_range */
375
376 sa1_cache_purgeID, /* idcache_wbinv_all */
377 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
378
379 /* Other functions */
380
381 cpufunc_nullop, /* flush_prefetchbuf */
382 armv4_drain_writebuf, /* drain_writebuf */
383 cpufunc_nullop, /* flush_brnchtgt_C */
384 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
385
386 (void *)cpufunc_nullop, /* sleep */
387
388 /* Soft functions */
389
390 cpufunc_null_fixup, /* dataabt_fixup */
391 cpufunc_null_fixup, /* prefetchabt_fixup */
392
393 sa110_context_switch, /* context_switch */
394
395 sa110_setup /* cpu setup */
396 };
397 #endif /* CPU_SA110 */
398
399 #if defined(CPU_SA1100) || defined(CPU_SA1110)
400 struct cpu_functions sa11x0_cpufuncs = {
401 /* CPU functions */
402
403 cpufunc_id, /* id */
404 cpufunc_nullop, /* cpwait */
405
406 /* MMU functions */
407
408 cpufunc_control, /* control */
409 cpufunc_domains, /* domain */
410 sa1_setttb, /* setttb */
411 cpufunc_faultstatus, /* faultstatus */
412 cpufunc_faultaddress, /* faultaddress */
413
414 /* TLB functions */
415
416 armv4_tlb_flushID, /* tlb_flushID */
417 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
418 armv4_tlb_flushI, /* tlb_flushI */
419 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
420 armv4_tlb_flushD, /* tlb_flushD */
421 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
422
423 /* Cache operations */
424
425 sa1_cache_syncI, /* icache_sync_all */
426 sa1_cache_syncI_rng, /* icache_sync_range */
427
428 sa1_cache_purgeD, /* dcache_wbinv_all */
429 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
430 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
431 sa1_cache_cleanD_rng, /* dcache_wb_range */
432
433 sa1_cache_purgeID, /* idcache_wbinv_all */
434 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
435
436 /* Other functions */
437
438 sa11x0_drain_readbuf, /* flush_prefetchbuf */
439 armv4_drain_writebuf, /* drain_writebuf */
440 cpufunc_nullop, /* flush_brnchtgt_C */
441 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
442
443 sa11x0_cpu_sleep, /* sleep */
444
445 /* Soft functions */
446
447 cpufunc_null_fixup, /* dataabt_fixup */
448 cpufunc_null_fixup, /* prefetchabt_fixup */
449
450 sa11x0_context_switch, /* context_switch */
451
452 sa11x0_setup /* cpu setup */
453 };
454 #endif /* CPU_SA1100 || CPU_SA1110 */
455
456 #ifdef CPU_IXP12X0
457 struct cpu_functions ixp12x0_cpufuncs = {
458 /* CPU functions */
459
460 cpufunc_id, /* id */
461 cpufunc_nullop, /* cpwait */
462
463 /* MMU functions */
464
465 cpufunc_control, /* control */
466 cpufunc_domains, /* domain */
467 sa1_setttb, /* setttb */
468 cpufunc_faultstatus, /* faultstatus */
469 cpufunc_faultaddress, /* faultaddress */
470
471 /* TLB functions */
472
473 armv4_tlb_flushID, /* tlb_flushID */
474 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
475 armv4_tlb_flushI, /* tlb_flushI */
476 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
477 armv4_tlb_flushD, /* tlb_flushD */
478 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
479
480 /* Cache operations */
481
482 sa1_cache_syncI, /* icache_sync_all */
483 sa1_cache_syncI_rng, /* icache_sync_range */
484
485 sa1_cache_purgeD, /* dcache_wbinv_all */
486 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
487 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
488 sa1_cache_cleanD_rng, /* dcache_wb_range */
489
490 sa1_cache_purgeID, /* idcache_wbinv_all */
491 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
492
493 /* Other functions */
494
495 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
496 armv4_drain_writebuf, /* drain_writebuf */
497 cpufunc_nullop, /* flush_brnchtgt_C */
498 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
499
500 (void *)cpufunc_nullop, /* sleep */
501
502 /* Soft functions */
503
504 cpufunc_null_fixup, /* dataabt_fixup */
505 cpufunc_null_fixup, /* prefetchabt_fixup */
506
507 ixp12x0_context_switch, /* context_switch */
508
509 ixp12x0_setup /* cpu setup */
510 };
511 #endif /* CPU_IXP12X0 */
512
513 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
514 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
515 struct cpu_functions xscale_cpufuncs = {
516 /* CPU functions */
517
518 cpufunc_id, /* id */
519 xscale_cpwait, /* cpwait */
520
521 /* MMU functions */
522
523 xscale_control, /* control */
524 cpufunc_domains, /* domain */
525 xscale_setttb, /* setttb */
526 cpufunc_faultstatus, /* faultstatus */
527 cpufunc_faultaddress, /* faultaddress */
528
529 /* TLB functions */
530
531 armv4_tlb_flushID, /* tlb_flushID */
532 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
533 armv4_tlb_flushI, /* tlb_flushI */
534 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
535 armv4_tlb_flushD, /* tlb_flushD */
536 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
537
538 /* Cache operations */
539
540 xscale_cache_syncI, /* icache_sync_all */
541 xscale_cache_syncI_rng, /* icache_sync_range */
542
543 xscale_cache_purgeD, /* dcache_wbinv_all */
544 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
545 xscale_cache_flushD_rng, /* dcache_inv_range */
546 xscale_cache_cleanD_rng, /* dcache_wb_range */
547
548 xscale_cache_purgeID, /* idcache_wbinv_all */
549 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
550
551 /* Other functions */
552
553 cpufunc_nullop, /* flush_prefetchbuf */
554 armv4_drain_writebuf, /* drain_writebuf */
555 cpufunc_nullop, /* flush_brnchtgt_C */
556 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
557
558 xscale_cpu_sleep, /* sleep */
559
560 /* Soft functions */
561
562 cpufunc_null_fixup, /* dataabt_fixup */
563 cpufunc_null_fixup, /* prefetchabt_fixup */
564
565 xscale_context_switch, /* context_switch */
566
567 xscale_setup /* cpu setup */
568 };
569 #endif
570 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
571
572 /*
573 * Global constants also used by locore.s
574 */
575
576 struct cpu_functions cpufuncs;
577 u_int cputype;
578 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
579
580 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
581 defined (CPU_ARM10) || \
582 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
583 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
584 static void get_cachetype_cp15(void);
585
586 /* Additional cache information local to this file. Log2 of some of the
587 above numbers. */
588 static int arm_dcache_l2_nsets;
589 static int arm_dcache_l2_assoc;
590 static int arm_dcache_l2_linesize;
591
592 static void
593 get_cachetype_cp15()
594 {
595 u_int ctype, isize, dsize;
596 u_int multiplier;
597
598 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
599 : "=r" (ctype));
600
601 /*
602 * ...and thus spake the ARM ARM:
603 *
604 * If an <opcode2> value corresponding to an unimplemented or
605 * reserved ID register is encountered, the System Control
606 * processor returns the value of the main ID register.
607 */
608 if (ctype == cpufunc_id())
609 goto out;
610
611 if ((ctype & CPU_CT_S) == 0)
612 arm_pcache_unified = 1;
613
614 /*
615 * If you want to know how this code works, go read the ARM ARM.
616 */
617
618 arm_pcache_type = CPU_CT_CTYPE(ctype);
619
620 if (arm_pcache_unified == 0) {
621 isize = CPU_CT_ISIZE(ctype);
622 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
623 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
624 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
625 if (isize & CPU_CT_xSIZE_M)
626 arm_picache_line_size = 0; /* not present */
627 else
628 arm_picache_ways = 1;
629 } else {
630 arm_picache_ways = multiplier <<
631 (CPU_CT_xSIZE_ASSOC(isize) - 1);
632 }
633 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
634 }
635
636 dsize = CPU_CT_DSIZE(ctype);
637 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
638 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
639 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
640 if (dsize & CPU_CT_xSIZE_M)
641 arm_pdcache_line_size = 0; /* not present */
642 else
643 arm_pdcache_ways = 1;
644 } else {
645 arm_pdcache_ways = multiplier <<
646 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
647 }
648 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
649
650 arm_dcache_align = arm_pdcache_line_size;
651
652 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
653 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
654 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
655 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
656
657 out:
658 arm_dcache_align_mask = arm_dcache_align - 1;
659 }
660 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
661
662 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
663 defined(CPU_IXP12X0)
664 /* Cache information for CPUs without cache type registers. */
665 struct cachetab {
666 u_int32_t ct_cpuid;
667 int ct_pcache_type;
668 int ct_pcache_unified;
669 int ct_pdcache_size;
670 int ct_pdcache_line_size;
671 int ct_pdcache_ways;
672 int ct_picache_size;
673 int ct_picache_line_size;
674 int ct_picache_ways;
675 };
676
677 struct cachetab cachetab[] = {
678 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
679 /* XXX is this type right for SA-1? */
680 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
681 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
682 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
683 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
684 { 0, 0, 0, 0, 0, 0, 0, 0}
685 };
686
687 static void get_cachetype_table(void);
688
689 static void
690 get_cachetype_table()
691 {
692 int i;
693 u_int32_t cpuid = cpufunc_id();
694
695 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
696 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
697 arm_pcache_type = cachetab[i].ct_pcache_type;
698 arm_pcache_unified = cachetab[i].ct_pcache_unified;
699 arm_pdcache_size = cachetab[i].ct_pdcache_size;
700 arm_pdcache_line_size =
701 cachetab[i].ct_pdcache_line_size;
702 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
703 arm_picache_size = cachetab[i].ct_picache_size;
704 arm_picache_line_size =
705 cachetab[i].ct_picache_line_size;
706 arm_picache_ways = cachetab[i].ct_picache_ways;
707 }
708 }
709 arm_dcache_align = arm_pdcache_line_size;
710
711 arm_dcache_align_mask = arm_dcache_align - 1;
712 }
713
714 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
715
716 /*
717 * Cannot panic here as we may not have a console yet ...
718 */
719
720 int
721 set_cpufuncs()
722 {
723 cputype = cpufunc_id();
724 cputype &= CPU_ID_CPU_MASK;
725
726 /*
727 * NOTE: cpu_do_powersave defaults to off. If we encounter a
728 * CPU type where we want to use it by default, then we set it.
729 */
730
731 #ifdef CPU_ARM7TDMI
732 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
733 CPU_ID_IS7(cputype) &&
734 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
735 cpufuncs = arm7tdmi_cpufuncs;
736 cpu_reset_needs_v4_MMU_disable = 0;
737 get_cachetype_cp15();
738 pmap_pte_init_generic();
739 return 0;
740 }
741 #endif
742 #ifdef CPU_ARM8
743 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
744 (cputype & 0x0000f000) == 0x00008000) {
745 cpufuncs = arm8_cpufuncs;
746 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
747 get_cachetype_cp15();
748 pmap_pte_init_arm8();
749 return 0;
750 }
751 #endif /* CPU_ARM8 */
752 #ifdef CPU_ARM9
753 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
754 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
755 (cputype & 0x0000f000) == 0x00009000) {
756 cpufuncs = arm9_cpufuncs;
757 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
758 get_cachetype_cp15();
759 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
760 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
761 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
762 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
763 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
764 #ifdef ARM9_CACHE_WRITE_THROUGH
765 pmap_pte_init_arm9();
766 #else
767 pmap_pte_init_generic();
768 #endif
769 return 0;
770 }
771 #endif /* CPU_ARM9 */
772 #ifdef CPU_ARM10
773 if (/* cputype == CPU_ID_ARM1020T || */
774 cputype == CPU_ID_ARM1020E) {
775 /*
776 * Select write-through cacheing (this isn't really an
777 * option on ARM1020T).
778 */
779 cpufuncs = arm10_cpufuncs;
780 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
781 get_cachetype_cp15();
782 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
783 arm10_dcache_sets_max =
784 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
785 arm10_dcache_sets_inc;
786 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
787 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
788 pmap_pte_init_generic();
789 return 0;
790 }
791 #endif /* CPU_ARM10 */
792 #ifdef CPU_SA110
793 if (cputype == CPU_ID_SA110) {
794 cpufuncs = sa110_cpufuncs;
795 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
796 get_cachetype_table();
797 pmap_pte_init_sa1();
798 return 0;
799 }
800 #endif /* CPU_SA110 */
801 #ifdef CPU_SA1100
802 if (cputype == CPU_ID_SA1100) {
803 cpufuncs = sa11x0_cpufuncs;
804 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
805 get_cachetype_table();
806 pmap_pte_init_sa1();
807 /* Use powersave on this CPU. */
808 cpu_do_powersave = 1;
809
810 return 0;
811 }
812 #endif /* CPU_SA1100 */
813 #ifdef CPU_SA1110
814 if (cputype == CPU_ID_SA1110) {
815 cpufuncs = sa11x0_cpufuncs;
816 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
817 get_cachetype_table();
818 pmap_pte_init_sa1();
819 /* Use powersave on this CPU. */
820 cpu_do_powersave = 1;
821
822 return 0;
823 }
824 #endif /* CPU_SA1110 */
825 #ifdef CPU_IXP12X0
826 if (cputype == CPU_ID_IXP1200) {
827 cpufuncs = ixp12x0_cpufuncs;
828 cpu_reset_needs_v4_MMU_disable = 1;
829 get_cachetype_table();
830 pmap_pte_init_sa1();
831 return 0;
832 }
833 #endif /* CPU_IXP12X0 */
834 #ifdef CPU_XSCALE_80200
835 if (cputype == CPU_ID_80200) {
836 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
837
838 i80200_icu_init();
839
840 /*
841 * Reset the Performance Monitoring Unit to a
842 * pristine state:
843 * - CCNT, PMN0, PMN1 reset to 0
844 * - overflow indications cleared
845 * - all counters disabled
846 */
847 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
848 :
849 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
850 PMNC_CC_IF));
851
852 #if defined(XSCALE_CCLKCFG)
853 /*
854 * Crank CCLKCFG to maximum legal value.
855 */
856 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
857 :
858 : "r" (XSCALE_CCLKCFG));
859 #endif
860
861 /*
862 * XXX Disable ECC in the Bus Controller Unit; we
863 * don't really support it, yet. Clear any pending
864 * error indications.
865 */
866 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
867 :
868 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
869
870 cpufuncs = xscale_cpufuncs;
871 #if defined(PERFCTRS)
872 xscale_pmu_init();
873 #endif
874
875 /*
876 * i80200 errata: Step-A0 and A1 have a bug where
877 * D$ dirty bits are not cleared on "invalidate by
878 * address".
879 *
880 * Workaround: Clean cache line before invalidating.
881 */
882 if (rev == 0 || rev == 1)
883 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
884
885 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
886 get_cachetype_cp15();
887 pmap_pte_init_xscale();
888 return 0;
889 }
890 #endif /* CPU_XSCALE_80200 */
891 #ifdef CPU_XSCALE_80321
892 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
893 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
894
895 /*
896 * Reset the Performance Monitoring Unit to a
897 * pristine state:
898 * - CCNT, PMN0, PMN1 reset to 0
899 * - overflow indications cleared
900 * - all counters disabled
901 */
902 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
903 :
904 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
905 PMNC_CC_IF));
906
907 cpufuncs = xscale_cpufuncs;
908 #if defined(PERFCTRS)
909 xscale_pmu_init();
910 #endif
911
912 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
913 get_cachetype_cp15();
914 pmap_pte_init_xscale();
915 return 0;
916 }
917 #endif /* CPU_XSCALE_80321 */
918 #ifdef CPU_XSCALE_PXA2X0
919 /* ignore core revision to test PXA2xx CPUs */
920 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
921 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
922
923 cpufuncs = xscale_cpufuncs;
924 #if defined(PERFCTRS)
925 xscale_pmu_init();
926 #endif
927
928 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
929 get_cachetype_cp15();
930 pmap_pte_init_xscale();
931
932 /* Use powersave on this CPU. */
933 cpu_do_powersave = 1;
934
935 return 0;
936 }
937 #endif /* CPU_XSCALE_PXA2X0 */
938 #ifdef CPU_XSCALE_IXP425
939 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
940 cputype == CPU_ID_IXP425_266) {
941 ixp425_icu_init();
942
943 cpufuncs = xscale_cpufuncs;
944 #if defined(PERFCTRS)
945 xscale_pmu_init();
946 #endif
947
948 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
949 get_cachetype_cp15();
950 pmap_pte_init_xscale();
951
952 return 0;
953 }
954 #endif /* CPU_XSCALE_IXP425 */
955 /*
956 * Bzzzz. And the answer was ...
957 */
958 panic("No support for this CPU type (%08x) in kernel", cputype);
959 return(ARCHITECTURE_NOT_PRESENT);
960 }
961
962 /*
963 * Fixup routines for data and prefetch aborts.
964 *
965 * Several compile time symbols are used
966 *
967 * DEBUG_FAULT_CORRECTION - Print debugging information during the
968 * correction of registers after a fault.
969 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
970 * when defined should use late aborts
971 */
972
973
974 /*
975 * Null abort fixup routine.
976 * For use when no fixup is required.
977 */
978 int
979 cpufunc_null_fixup(arg)
980 void *arg;
981 {
982 return(ABORT_FIXUP_OK);
983 }
984
985
986 #if defined(CPU_ARM7TDMI)
987
988 #ifdef DEBUG_FAULT_CORRECTION
989 #define DFC_PRINTF(x) printf x
990 #define DFC_DISASSEMBLE(x) disassemble(x)
991 #else
992 #define DFC_PRINTF(x) /* nothing */
993 #define DFC_DISASSEMBLE(x) /* nothing */
994 #endif
995
996 /*
997 * "Early" data abort fixup.
998 *
999 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1000 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1001 *
1002 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1003 */
1004 int
1005 early_abort_fixup(arg)
1006 void *arg;
1007 {
1008 trapframe_t *frame = arg;
1009 u_int fault_pc;
1010 u_int fault_instruction;
1011 int saved_lr = 0;
1012
1013 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1014
1015 /* Ok an abort in SVC mode */
1016
1017 /*
1018 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1019 * as the fault happened in svc mode but we need it in the
1020 * usr slot so we can treat the registers as an array of ints
1021 * during fixing.
1022 * NOTE: This PC is in the position but writeback is not
1023 * allowed on r15.
1024 * Doing it like this is more efficient than trapping this
1025 * case in all possible locations in the following fixup code.
1026 */
1027
1028 saved_lr = frame->tf_usr_lr;
1029 frame->tf_usr_lr = frame->tf_svc_lr;
1030
1031 /*
1032 * Note the trapframe does not have the SVC r13 so a fault
1033 * from an instruction with writeback to r13 in SVC mode is
1034 * not allowed. This should not happen as the kstack is
1035 * always valid.
1036 */
1037 }
1038
1039 /* Get fault address and status from the CPU */
1040
1041 fault_pc = frame->tf_pc;
1042 fault_instruction = *((volatile unsigned int *)fault_pc);
1043
1044 /* Decode the fault instruction and fix the registers as needed */
1045
1046 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1047 int base;
1048 int loop;
1049 int count;
1050 int *registers = &frame->tf_r0;
1051
1052 DFC_PRINTF(("LDM/STM\n"));
1053 DFC_DISASSEMBLE(fault_pc);
1054 if (fault_instruction & (1 << 21)) {
1055 DFC_PRINTF(("This instruction must be corrected\n"));
1056 base = (fault_instruction >> 16) & 0x0f;
1057 if (base == 15)
1058 return ABORT_FIXUP_FAILED;
1059 /* Count registers transferred */
1060 count = 0;
1061 for (loop = 0; loop < 16; ++loop) {
1062 if (fault_instruction & (1<<loop))
1063 ++count;
1064 }
1065 DFC_PRINTF(("%d registers used\n", count));
1066 DFC_PRINTF(("Corrected r%d by %d bytes ",
1067 base, count * 4));
1068 if (fault_instruction & (1 << 23)) {
1069 DFC_PRINTF(("down\n"));
1070 registers[base] -= count * 4;
1071 } else {
1072 DFC_PRINTF(("up\n"));
1073 registers[base] += count * 4;
1074 }
1075 }
1076 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1077 int base;
1078 int offset;
1079 int *registers = &frame->tf_r0;
1080
1081 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1082
1083 DFC_DISASSEMBLE(fault_pc);
1084
1085 /* Only need to fix registers if write back is turned on */
1086
1087 if ((fault_instruction & (1 << 21)) != 0) {
1088 base = (fault_instruction >> 16) & 0x0f;
1089 if (base == 13 &&
1090 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1091 return ABORT_FIXUP_FAILED;
1092 if (base == 15)
1093 return ABORT_FIXUP_FAILED;
1094
1095 offset = (fault_instruction & 0xff) << 2;
1096 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1097 if ((fault_instruction & (1 << 23)) != 0)
1098 offset = -offset;
1099 registers[base] += offset;
1100 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1101 }
1102 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1103 return ABORT_FIXUP_FAILED;
1104
1105 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1106
1107 /* Ok an abort in SVC mode */
1108
1109 /*
1110 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1111 * as the fault happened in svc mode but we need it in the
1112 * usr slot so we can treat the registers as an array of ints
1113 * during fixing.
1114 * NOTE: This PC is in the position but writeback is not
1115 * allowed on r15.
1116 * Doing it like this is more efficient than trapping this
1117 * case in all possible locations in the prior fixup code.
1118 */
1119
1120 frame->tf_svc_lr = frame->tf_usr_lr;
1121 frame->tf_usr_lr = saved_lr;
1122
1123 /*
1124 * Note the trapframe does not have the SVC r13 so a fault
1125 * from an instruction with writeback to r13 in SVC mode is
1126 * not allowed. This should not happen as the kstack is
1127 * always valid.
1128 */
1129 }
1130
1131 return(ABORT_FIXUP_OK);
1132 }
1133 #endif /* CPU_ARM2/250/3/6/7 */
1134
1135
1136 #if defined(CPU_ARM7TDMI)
1137 /*
1138 * "Late" (base updated) data abort fixup
1139 *
1140 * For ARM6 (in late-abort mode) and ARM7.
1141 *
1142 * In this model, all data-transfer instructions need fixing up. We defer
1143 * LDM, STM, LDC and STC fixup to the early-abort handler.
1144 */
1145 int
1146 late_abort_fixup(arg)
1147 void *arg;
1148 {
1149 trapframe_t *frame = arg;
1150 u_int fault_pc;
1151 u_int fault_instruction;
1152 int saved_lr = 0;
1153
1154 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1155
1156 /* Ok an abort in SVC mode */
1157
1158 /*
1159 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1160 * as the fault happened in svc mode but we need it in the
1161 * usr slot so we can treat the registers as an array of ints
1162 * during fixing.
1163 * NOTE: This PC is in the position but writeback is not
1164 * allowed on r15.
1165 * Doing it like this is more efficient than trapping this
1166 * case in all possible locations in the following fixup code.
1167 */
1168
1169 saved_lr = frame->tf_usr_lr;
1170 frame->tf_usr_lr = frame->tf_svc_lr;
1171
1172 /*
1173 * Note the trapframe does not have the SVC r13 so a fault
1174 * from an instruction with writeback to r13 in SVC mode is
1175 * not allowed. This should not happen as the kstack is
1176 * always valid.
1177 */
1178 }
1179
1180 /* Get fault address and status from the CPU */
1181
1182 fault_pc = frame->tf_pc;
1183 fault_instruction = *((volatile unsigned int *)fault_pc);
1184
1185 /* Decode the fault instruction and fix the registers as needed */
1186
1187 /* Was is a swap instruction ? */
1188
1189 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1190 DFC_DISASSEMBLE(fault_pc);
1191 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1192
1193 /* Was is a ldr/str instruction */
1194 /* This is for late abort only */
1195
1196 int base;
1197 int offset;
1198 int *registers = &frame->tf_r0;
1199
1200 DFC_DISASSEMBLE(fault_pc);
1201
1202 /* This is for late abort only */
1203
1204 if ((fault_instruction & (1 << 24)) == 0
1205 || (fault_instruction & (1 << 21)) != 0) {
1206 /* postindexed ldr/str with no writeback */
1207
1208 base = (fault_instruction >> 16) & 0x0f;
1209 if (base == 13 &&
1210 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1211 return ABORT_FIXUP_FAILED;
1212 if (base == 15)
1213 return ABORT_FIXUP_FAILED;
1214 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1215 base, registers[base]));
1216 if ((fault_instruction & (1 << 25)) == 0) {
1217 /* Immediate offset - easy */
1218
1219 offset = fault_instruction & 0xfff;
1220 if ((fault_instruction & (1 << 23)))
1221 offset = -offset;
1222 registers[base] += offset;
1223 DFC_PRINTF(("imm=%08x ", offset));
1224 } else {
1225 /* offset is a shifted register */
1226 int shift;
1227
1228 offset = fault_instruction & 0x0f;
1229 if (offset == base)
1230 return ABORT_FIXUP_FAILED;
1231
1232 /*
1233 * Register offset - hard we have to
1234 * cope with shifts !
1235 */
1236 offset = registers[offset];
1237
1238 if ((fault_instruction & (1 << 4)) == 0)
1239 /* shift with amount */
1240 shift = (fault_instruction >> 7) & 0x1f;
1241 else {
1242 /* shift with register */
1243 if ((fault_instruction & (1 << 7)) != 0)
1244 /* undefined for now so bail out */
1245 return ABORT_FIXUP_FAILED;
1246 shift = ((fault_instruction >> 8) & 0xf);
1247 if (base == shift)
1248 return ABORT_FIXUP_FAILED;
1249 DFC_PRINTF(("shift reg=%d ", shift));
1250 shift = registers[shift];
1251 }
1252 DFC_PRINTF(("shift=%08x ", shift));
1253 switch (((fault_instruction >> 5) & 0x3)) {
1254 case 0 : /* Logical left */
1255 offset = (int)(((u_int)offset) << shift);
1256 break;
1257 case 1 : /* Logical Right */
1258 if (shift == 0) shift = 32;
1259 offset = (int)(((u_int)offset) >> shift);
1260 break;
1261 case 2 : /* Arithmetic Right */
1262 if (shift == 0) shift = 32;
1263 offset = (int)(((int)offset) >> shift);
1264 break;
1265 case 3 : /* Rotate right (rol or rxx) */
1266 return ABORT_FIXUP_FAILED;
1267 break;
1268 }
1269
1270 DFC_PRINTF(("abt: fixed LDR/STR with "
1271 "register offset\n"));
1272 if ((fault_instruction & (1 << 23)))
1273 offset = -offset;
1274 DFC_PRINTF(("offset=%08x ", offset));
1275 registers[base] += offset;
1276 }
1277 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1278 }
1279 }
1280
1281 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1282
1283 /* Ok an abort in SVC mode */
1284
1285 /*
1286 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1287 * as the fault happened in svc mode but we need it in the
1288 * usr slot so we can treat the registers as an array of ints
1289 * during fixing.
1290 * NOTE: This PC is in the position but writeback is not
1291 * allowed on r15.
1292 * Doing it like this is more efficient than trapping this
1293 * case in all possible locations in the prior fixup code.
1294 */
1295
1296 frame->tf_svc_lr = frame->tf_usr_lr;
1297 frame->tf_usr_lr = saved_lr;
1298
1299 /*
1300 * Note the trapframe does not have the SVC r13 so a fault
1301 * from an instruction with writeback to r13 in SVC mode is
1302 * not allowed. This should not happen as the kstack is
1303 * always valid.
1304 */
1305 }
1306
1307 /*
1308 * Now let the early-abort fixup routine have a go, in case it
1309 * was an LDM, STM, LDC or STC that faulted.
1310 */
1311
1312 return early_abort_fixup(arg);
1313 }
1314 #endif /* CPU_ARM7TDMI */
1315
1316 /*
1317 * CPU Setup code
1318 */
1319
1320 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1321 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1322 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1323 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
1324
1325 #define IGN 0
1326 #define OR 1
1327 #define BIC 2
1328
1329 struct cpu_option {
1330 char *co_name;
1331 int co_falseop;
1332 int co_trueop;
1333 int co_value;
1334 };
1335
1336 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1337
1338 static u_int
1339 parse_cpu_options(args, optlist, cpuctrl)
1340 char *args;
1341 struct cpu_option *optlist;
1342 u_int cpuctrl;
1343 {
1344 int integer;
1345
1346 if (args == NULL)
1347 return(cpuctrl);
1348
1349 while (optlist->co_name) {
1350 if (get_bootconf_option(args, optlist->co_name,
1351 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1352 if (integer) {
1353 if (optlist->co_trueop == OR)
1354 cpuctrl |= optlist->co_value;
1355 else if (optlist->co_trueop == BIC)
1356 cpuctrl &= ~optlist->co_value;
1357 } else {
1358 if (optlist->co_falseop == OR)
1359 cpuctrl |= optlist->co_value;
1360 else if (optlist->co_falseop == BIC)
1361 cpuctrl &= ~optlist->co_value;
1362 }
1363 }
1364 ++optlist;
1365 }
1366 return(cpuctrl);
1367 }
1368 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1369
1370 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1371 struct cpu_option arm678_options[] = {
1372 #ifdef COMPAT_12
1373 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1374 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1375 #endif /* COMPAT_12 */
1376 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1377 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1378 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1379 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1380 { NULL, IGN, IGN, 0 }
1381 };
1382
1383 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1384
1385 #ifdef CPU_ARM7TDMI
1386 struct cpu_option arm7tdmi_options[] = {
1387 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1388 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1389 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1390 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1391 #ifdef COMPAT_12
1392 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1393 #endif /* COMPAT_12 */
1394 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1395 { NULL, IGN, IGN, 0 }
1396 };
1397
1398 void
1399 arm7tdmi_setup(args)
1400 char *args;
1401 {
1402 int cpuctrl;
1403
1404 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1405 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1406 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1407
1408 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1409 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1410
1411 #ifdef __ARMEB__
1412 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1413 #endif
1414
1415 /* Clear out the cache */
1416 cpu_idcache_wbinv_all();
1417
1418 /* Set the control register */
1419 ctrl = cpuctrl;
1420 cpu_control(0xffffffff, cpuctrl);
1421 }
1422 #endif /* CPU_ARM7TDMI */
1423
1424 #ifdef CPU_ARM8
1425 struct cpu_option arm8_options[] = {
1426 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1427 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1428 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1429 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1430 #ifdef COMPAT_12
1431 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1432 #endif /* COMPAT_12 */
1433 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1434 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1435 { NULL, IGN, IGN, 0 }
1436 };
1437
1438 void
1439 arm8_setup(args)
1440 char *args;
1441 {
1442 int integer;
1443 int cpuctrl, cpuctrlmask;
1444 int clocktest;
1445 int setclock = 0;
1446
1447 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1448 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1449 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1450 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1451 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1452 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1453 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1454 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1455
1456 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1457 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1458 #endif
1459
1460 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1461 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1462
1463 #ifdef __ARMEB__
1464 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1465 #endif
1466
1467 /* Get clock configuration */
1468 clocktest = arm8_clock_config(0, 0) & 0x0f;
1469
1470 /* Special ARM8 clock and test configuration */
1471 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1472 clocktest = 0;
1473 setclock = 1;
1474 }
1475 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1476 if (integer)
1477 clocktest |= 0x01;
1478 else
1479 clocktest &= ~(0x01);
1480 setclock = 1;
1481 }
1482 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1483 if (integer)
1484 clocktest |= 0x02;
1485 else
1486 clocktest &= ~(0x02);
1487 setclock = 1;
1488 }
1489 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1490 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1491 setclock = 1;
1492 }
1493 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1494 clocktest |= (integer & 7) << 5;
1495 setclock = 1;
1496 }
1497
1498 /* Clear out the cache */
1499 cpu_idcache_wbinv_all();
1500
1501 /* Set the control register */
1502 ctrl = cpuctrl;
1503 cpu_control(0xffffffff, cpuctrl);
1504
1505 /* Set the clock/test register */
1506 if (setclock)
1507 arm8_clock_config(0x7f, clocktest);
1508 }
1509 #endif /* CPU_ARM8 */
1510
1511 #ifdef CPU_ARM9
1512 struct cpu_option arm9_options[] = {
1513 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1514 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1515 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1516 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1517 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1518 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1519 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1520 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1521 { NULL, IGN, IGN, 0 }
1522 };
1523
1524 void
1525 arm9_setup(args)
1526 char *args;
1527 {
1528 int cpuctrl, cpuctrlmask;
1529
1530 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1531 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1532 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1533 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1534 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1535 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1536 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1537 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1538 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1539 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1540 | CPU_CONTROL_ROUNDROBIN;
1541
1542 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1543 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1544 #endif
1545
1546 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1547
1548 #ifdef __ARMEB__
1549 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1550 #endif
1551 if (vector_page == ARM_VECTORS_HIGH)
1552 cpuctrl |= CPU_CONTROL_VECRELOC;
1553
1554 /* Clear out the cache */
1555 cpu_idcache_wbinv_all();
1556
1557 /* Set the control register */
1558 cpu_control(cpuctrlmask, cpuctrl);
1559 ctrl = cpuctrl;
1560
1561 }
1562 #endif /* CPU_ARM9 */
1563
1564 #ifdef CPU_ARM10
1565 struct cpu_option arm10_options[] = {
1566 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1567 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1568 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1569 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1570 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1571 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1572 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1573 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1574 { NULL, IGN, IGN, 0 }
1575 };
1576
1577 void
1578 arm10_setup(args)
1579 char *args;
1580 {
1581 int cpuctrl, cpuctrlmask;
1582
1583 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1584 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1585 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1586 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1587 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1588 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1589 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1590 | CPU_CONTROL_BPRD_ENABLE
1591 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1592
1593 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1594 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1595 #endif
1596
1597 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1598
1599 #ifdef __ARMEB__
1600 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1601 #endif
1602
1603 /* Clear out the cache */
1604 cpu_idcache_wbinv_all();
1605
1606 /* Now really make sure they are clean. */
1607 asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1608
1609 /* Set the control register */
1610 ctrl = cpuctrl;
1611 cpu_control(0xffffffff, cpuctrl);
1612
1613 /* And again. */
1614 cpu_idcache_wbinv_all();
1615 }
1616 #endif /* CPU_ARM10 */
1617
1618 #ifdef CPU_SA110
1619 struct cpu_option sa110_options[] = {
1620 #ifdef COMPAT_12
1621 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1622 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1623 #endif /* COMPAT_12 */
1624 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1625 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1626 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1627 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1628 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1629 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1630 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1631 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1632 { NULL, IGN, IGN, 0 }
1633 };
1634
1635 void
1636 sa110_setup(args)
1637 char *args;
1638 {
1639 int cpuctrl, cpuctrlmask;
1640
1641 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1642 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1643 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1644 | CPU_CONTROL_WBUF_ENABLE;
1645 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1646 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1647 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1648 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1649 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1650 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1651 | CPU_CONTROL_CPCLK;
1652
1653 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1654 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1655 #endif
1656
1657 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1658
1659 #ifdef __ARMEB__
1660 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1661 #endif
1662
1663 /* Clear out the cache */
1664 cpu_idcache_wbinv_all();
1665
1666 /* Set the control register */
1667 ctrl = cpuctrl;
1668 /* cpu_control(cpuctrlmask, cpuctrl);*/
1669 cpu_control(0xffffffff, cpuctrl);
1670
1671 /*
1672 * enable clockswitching, note that this doesn't read or write to r0,
1673 * r0 is just to make it valid asm
1674 */
1675 __asm ("mcr 15, 0, r0, c15, c1, 2");
1676 }
1677 #endif /* CPU_SA110 */
1678
1679 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1680 struct cpu_option sa11x0_options[] = {
1681 #ifdef COMPAT_12
1682 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1683 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1684 #endif /* COMPAT_12 */
1685 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1686 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1687 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1688 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1689 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1690 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1691 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1692 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1693 { NULL, IGN, IGN, 0 }
1694 };
1695
1696 void
1697 sa11x0_setup(args)
1698 char *args;
1699 {
1700 int cpuctrl, cpuctrlmask;
1701
1702 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1703 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1704 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1705 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1706 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1707 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1708 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1709 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1710 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1711 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1712 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1713
1714 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1715 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1716 #endif
1717
1718
1719 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1720
1721 #ifdef __ARMEB__
1722 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1723 #endif
1724
1725 if (vector_page == ARM_VECTORS_HIGH)
1726 cpuctrl |= CPU_CONTROL_VECRELOC;
1727 /* Clear out the cache */
1728 cpu_idcache_wbinv_all();
1729 /* Set the control register */
1730 ctrl = cpuctrl;
1731 cpu_control(0xffffffff, cpuctrl);
1732 }
1733 #endif /* CPU_SA1100 || CPU_SA1110 */
1734
1735 #if defined(CPU_IXP12X0)
1736 struct cpu_option ixp12x0_options[] = {
1737 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1738 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1739 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1740 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1741 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1742 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1743 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1744 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1745 { NULL, IGN, IGN, 0 }
1746 };
1747
1748 void
1749 ixp12x0_setup(args)
1750 char *args;
1751 {
1752 int cpuctrl, cpuctrlmask;
1753
1754
1755 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1756 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1757 | CPU_CONTROL_IC_ENABLE;
1758
1759 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1760 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1761 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1762 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1763 | CPU_CONTROL_VECRELOC;
1764
1765 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1766 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1767 #endif
1768
1769 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1770
1771 #ifdef __ARMEB__
1772 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1773 #endif
1774
1775 if (vector_page == ARM_VECTORS_HIGH)
1776 cpuctrl |= CPU_CONTROL_VECRELOC;
1777
1778 /* Clear out the cache */
1779 cpu_idcache_wbinv_all();
1780
1781 /* Set the control register */
1782 ctrl = cpuctrl;
1783 /* cpu_control(0xffffffff, cpuctrl); */
1784 cpu_control(cpuctrlmask, cpuctrl);
1785 }
1786 #endif /* CPU_IXP12X0 */
1787
1788 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1789 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
1790 struct cpu_option xscale_options[] = {
1791 #ifdef COMPAT_12
1792 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1793 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1794 #endif /* COMPAT_12 */
1795 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1796 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1797 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1798 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1799 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1800 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1801 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1802 { NULL, IGN, IGN, 0 }
1803 };
1804
1805 void
1806 xscale_setup(args)
1807 char *args;
1808 {
1809 uint32_t auxctl;
1810 int cpuctrl, cpuctrlmask;
1811
1812 /*
1813 * The XScale Write Buffer is always enabled. Our option
1814 * is to enable/disable coalescing. Note that bits 6:3
1815 * must always be enabled.
1816 */
1817
1818 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1819 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1820 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1821 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1822 | CPU_CONTROL_BPRD_ENABLE;
1823 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1824 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1825 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1826 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1827 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1828 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1829 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1830
1831 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1832 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1833 #endif
1834
1835 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1836
1837 #ifdef __ARMEB__
1838 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1839 #endif
1840
1841 if (vector_page == ARM_VECTORS_HIGH)
1842 cpuctrl |= CPU_CONTROL_VECRELOC;
1843
1844 /* Clear out the cache */
1845 cpu_idcache_wbinv_all();
1846
1847 /*
1848 * Set the control register. Note that bits 6:3 must always
1849 * be set to 1.
1850 */
1851 ctrl = cpuctrl;
1852 /* cpu_control(cpuctrlmask, cpuctrl);*/
1853 cpu_control(0xffffffff, cpuctrl);
1854
1855 /* Make sure write coalescing is turned on */
1856 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1857 : "=r" (auxctl));
1858 #ifdef XSCALE_NO_COALESCE_WRITES
1859 auxctl |= XSCALE_AUXCTL_K;
1860 #else
1861 auxctl &= ~XSCALE_AUXCTL_K;
1862 #endif
1863 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1864 : : "r" (auxctl));
1865 }
1866 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
Cache object: 3dbadfd1289cc20cfcfd0941acadc9e6
|