1 /*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/10.1/sys/powerpc/booke/locore.S 266001 2014-05-14 03:09:37Z ian $
27 */
28
29 #include "assym.s"
30
31 #include <machine/asm.h>
32 #include <machine/hid.h>
33 #include <machine/param.h>
34 #include <machine/spr.h>
35 #include <machine/pte.h>
36 #include <machine/trap.h>
37 #include <machine/vmparam.h>
38 #include <machine/tlb.h>
39
40 #define TMPSTACKSZ 16384
41
42 .text
43 .globl btext
44 btext:
45
46 /*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50 .globl kernel_text
51 kernel_text:
52
53 /*
54 * Startup entry. Note, this must be the first thing in the text segment!
55 */
56 .text
57 .globl __start
58 __start:
59
60 /*
61 * Assumptions on the boot loader:
62 * - system memory starts from physical address 0
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - kernel is loaded at 16MB boundary
66 * - all PID registers are set to the same value
67 * - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 * r1 : stack pointer
71 * r3 : metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 * - find TLB1 entry we started in
75 * - make sure it's protected, ivalidate other entries
76 * - create temp entry in the second AS (make sure it's not TLB[1])
77 * - switch to temp mapping
78 * - map 16MB of RAM in TLB1[1]
79 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 * - switch to to TLB1[1] mapping
81 * - invalidate temp mapping
82 *
83 * locore registers use:
84 * r1 : stack pointer
85 * r2 : trace pointer (AP only, for early diagnostics)
86 * r3-r27 : scratch registers
87 * r28 : temp TLB1 entry
88 * r29 : initial TLB1 entry we started in
89 * r30-r31 : arguments (metadata pointer)
90 */
91
92 /*
93 * Keep arguments in r30 & r31 for later use.
94 */
95 mr %r30, %r3
96 mr %r31, %r4
97
98 /*
99 * Initial cleanup
100 */
101 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
102 mtmsr %r3
103 isync
104
105 lis %r3, HID0_E500_DEFAULT_SET@h
106 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
107 mtspr SPR_HID0, %r3
108 isync
109 lis %r3, HID1_E500_DEFAULT_SET@h
110 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
111 mtspr SPR_HID1, %r3
112 isync
113
114 /* Invalidate all entries in TLB0 */
115 li %r3, 0
116 bl tlb_inval_all
117
118 cmpwi %r30, 0
119 beq done_mapping
120
121 /*
122 * Locate the TLB1 entry that maps this code
123 */
124 bl 1f
125 1: mflr %r3
126 bl tlb1_find_current /* the entry found is returned in r29 */
127
128 bl tlb1_inval_all_but_current
129
130 /*
131 * Create temporary mapping in AS=1 and switch to it
132 */
133 addi %r3, %r29, 1
134 bl tlb1_temp_mapping_as1
135
136 mfmsr %r3
137 ori %r3, %r3, (PSL_IS | PSL_DS)
138 bl 2f
139 2: mflr %r4
140 addi %r4, %r4, 20
141 mtspr SPR_SRR0, %r4
142 mtspr SPR_SRR1, %r3
143 rfi /* Switch context */
144
145 /*
146 * Invalidate initial entry
147 */
148 mr %r3, %r29
149 bl tlb1_inval_entry
150
151 /*
152 * Setup final mapping in TLB1[1] and switch to it
153 */
154 /* Final kernel mapping, map in 16 MB of RAM */
155 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
156 li %r4, 0 /* Entry 0 */
157 rlwimi %r3, %r4, 16, 12, 15
158 mtspr SPR_MAS0, %r3
159 isync
160
161 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
162 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
163 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
164 isync
165
166 lis %r3, KERNBASE@h
167 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
168 #ifdef SMP
169 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
170 #endif
171 mtspr SPR_MAS2, %r3
172 isync
173
174 /* Discover phys load address */
175 bl 3f
176 3: mflr %r4 /* Use current address */
177 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
178 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
179 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
180 isync
181 tlbwe
182 isync
183 msync
184
185 /* Switch to the above TLB1[1] mapping */
186 bl 4f
187 4: mflr %r4
188 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
189 rlwinm %r3, %r3, 0, 0, 19
190 add %r4, %r4, %r3 /* Convert to kernel virtual address */
191 addi %r4, %r4, 36
192 li %r3, PSL_DE /* Note AS=0 */
193 mtspr SPR_SRR0, %r4
194 mtspr SPR_SRR1, %r3
195 rfi
196
197 /*
198 * Invalidate temp mapping
199 */
200 mr %r3, %r28
201 bl tlb1_inval_entry
202
203 done_mapping:
204
205 /*
206 * Setup a temporary stack
207 */
208 lis %r1, tmpstack@ha
209 addi %r1, %r1, tmpstack@l
210 addi %r1, %r1, (TMPSTACKSZ - 8)
211
212 /*
213 * Initialise exception vector offsets
214 */
215 bl ivor_setup
216
217 /*
218 * Set up arguments and jump to system initialization code
219 */
220 mr %r3, %r30
221 mr %r4, %r31
222
223 /* Prepare core */
224 bl booke_init
225
226 /* Switch to thread0.td_kstack now */
227 mr %r1, %r3
228 li %r3, 0
229 stw %r3, 0(%r1)
230
231 /* Machine independet part, does not return */
232 bl mi_startup
233 /* NOT REACHED */
234 5: b 5b
235
236
237 #ifdef SMP
238 /************************************************************************/
239 /* AP Boot page */
240 /************************************************************************/
241 .text
242 .globl __boot_page
243 .align 12
244 __boot_page:
245 bl 1f
246
247 .globl bp_ntlb1s
248 bp_ntlb1s:
249 .long 0
250
251 .globl bp_tlb1
252 bp_tlb1:
253 .space 4 * 3 * 16
254
255 .globl bp_tlb1_end
256 bp_tlb1_end:
257
258 /*
259 * Initial configuration
260 */
261 1: mflr %r31 /* r31 hold the address of bp_ntlb1s */
262
263 /* Set HIDs */
264 lis %r3, HID0_E500_DEFAULT_SET@h
265 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
266 mtspr SPR_HID0, %r3
267 isync
268 lis %r3, HID1_E500_DEFAULT_SET@h
269 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
270 mtspr SPR_HID1, %r3
271 isync
272
273 /* Enable branch prediction */
274 li %r3, BUCSR_BPEN
275 mtspr SPR_BUCSR, %r3
276 isync
277
278 /* Invalidate all entries in TLB0 */
279 li %r3, 0
280 bl tlb_inval_all
281
282 /*
283 * Find TLB1 entry which is translating us now
284 */
285 bl 2f
286 2: mflr %r3
287 bl tlb1_find_current /* the entry number found is in r29 */
288
289 bl tlb1_inval_all_but_current
290
291 /*
292 * Create temporary translation in AS=1 and switch to it
293 */
294 lwz %r3, 0(%r31)
295 bl tlb1_temp_mapping_as1
296
297 mfmsr %r3
298 ori %r3, %r3, (PSL_IS | PSL_DS)
299 bl 3f
300 3: mflr %r4
301 addi %r4, %r4, 20
302 mtspr SPR_SRR0, %r4
303 mtspr SPR_SRR1, %r3
304 rfi /* Switch context */
305
306 /*
307 * Invalidate initial entry
308 */
309 mr %r3, %r29
310 bl tlb1_inval_entry
311
312 /*
313 * Setup final mapping in TLB1[1] and switch to it
314 */
315 lwz %r6, 0(%r31)
316 addi %r5, %r31, 4
317 li %r4, 0
318
319 4: lis %r3, MAS0_TLBSEL1@h
320 rlwimi %r3, %r4, 16, 12, 15
321 mtspr SPR_MAS0, %r3
322 isync
323 lwz %r3, 0(%r5)
324 mtspr SPR_MAS1, %r3
325 isync
326 lwz %r3, 4(%r5)
327 mtspr SPR_MAS2, %r3
328 isync
329 lwz %r3, 8(%r5)
330 mtspr SPR_MAS3, %r3
331 isync
332 tlbwe
333 isync
334 msync
335 addi %r5, %r5, 12
336 addi %r4, %r4, 1
337 cmpw %r4, %r6
338 blt 4b
339
340 /* Switch to the final mapping */
341 lis %r5, __boot_page@ha
342 ori %r5, %r5, __boot_page@l
343 bl 5f
344 5: mflr %r3
345 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
346 add %r3, %r3, %r5 /* Make this virtual address */
347 addi %r3, %r3, 32
348 li %r4, 0 /* Note AS=0 */
349 mtspr SPR_SRR0, %r3
350 mtspr SPR_SRR1, %r4
351 rfi
352
353 /*
354 * At this point we're running at virtual addresses KERNBASE and beyond so
355 * it's allowed to directly access all locations the kernel was linked
356 * against.
357 */
358
359 /*
360 * Invalidate temp mapping
361 */
362 mr %r3, %r28
363 bl tlb1_inval_entry
364
365 /*
366 * Setup a temporary stack
367 */
368 lis %r1, tmpstack@ha
369 addi %r1, %r1, tmpstack@l
370 addi %r1, %r1, (TMPSTACKSZ - 8)
371
372 /*
373 * Initialise exception vector offsets
374 */
375 bl ivor_setup
376
377 /*
378 * Assign our pcpu instance
379 */
380 lis %r3, ap_pcpu@h
381 ori %r3, %r3, ap_pcpu@l
382 lwz %r3, 0(%r3)
383 mtsprg0 %r3
384
385 bl pmap_bootstrap_ap
386
387 bl cpudep_ap_bootstrap
388 /* Switch to the idle thread's kstack */
389 mr %r1, %r3
390
391 bl machdep_ap_bootstrap
392
393 /* NOT REACHED */
394 6: b 6b
395 #endif /* SMP */
396
397 /*
398 * Invalidate all entries in the given TLB.
399 *
400 * r3 TLBSEL
401 */
402 tlb_inval_all:
403 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
404 ori %r3, %r3, 0x4 /* INVALL */
405 tlbivax 0, %r3
406 isync
407 msync
408
409 tlbsync
410 msync
411 blr
412
413 /*
414 * expects address to look up in r3, returns entry number in r29
415 *
416 * FIXME: the hidden assumption is we are now running in AS=0, but we should
417 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
418 */
419 tlb1_find_current:
420 mfspr %r17, SPR_PID0
421 slwi %r17, %r17, MAS6_SPID0_SHIFT
422 mtspr SPR_MAS6, %r17
423 isync
424 tlbsx 0, %r3
425 mfspr %r17, SPR_MAS0
426 rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */
427
428 /* Make sure we have IPROT set on the entry */
429 mfspr %r17, SPR_MAS1
430 oris %r17, %r17, MAS1_IPROT@h
431 mtspr SPR_MAS1, %r17
432 isync
433 tlbwe
434 isync
435 msync
436 blr
437
438 /*
439 * Invalidates a single entry in TLB1.
440 *
441 * r3 ESEL
442 * r4-r5 scratched
443 */
444 tlb1_inval_entry:
445 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
446 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
447 mtspr SPR_MAS0, %r4
448 isync
449 tlbre
450 li %r5, 0 /* MAS1[V] = 0 */
451 mtspr SPR_MAS1, %r5
452 isync
453 tlbwe
454 isync
455 msync
456 blr
457
458 /*
459 * r3 entry of temp translation
460 * r29 entry of current translation
461 * r28 returns temp entry passed in r3
462 * r4-r5 scratched
463 */
464 tlb1_temp_mapping_as1:
465 mr %r28, %r3
466
467 /* Read our current translation */
468 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
469 rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */
470 mtspr SPR_MAS0, %r3
471 isync
472 tlbre
473
474 /* Prepare and write temp entry */
475 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
476 rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */
477 mtspr SPR_MAS0, %r3
478 isync
479 mfspr %r5, SPR_MAS1
480 li %r4, 1 /* AS=1 */
481 rlwimi %r5, %r4, 12, 19, 19
482 li %r4, 0 /* Global mapping, TID=0 */
483 rlwimi %r5, %r4, 16, 8, 15
484 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
485 mtspr SPR_MAS1, %r5
486 isync
487 tlbwe
488 isync
489 msync
490 blr
491
492 /*
493 * Loops over TLB1, invalidates all entries skipping the one which currently
494 * maps this code.
495 *
496 * r29 current entry
497 * r3-r5 scratched
498 */
499 tlb1_inval_all_but_current:
500 mr %r6, %r3
501 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
502 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
503 li %r4, 0 /* Start from Entry 0 */
504 1: lis %r5, MAS0_TLBSEL1@h
505 rlwimi %r5, %r4, 16, 12, 15
506 mtspr SPR_MAS0, %r5
507 isync
508 tlbre
509 mfspr %r5, SPR_MAS1
510 cmpw %r4, %r29 /* our current entry? */
511 beq 2f
512 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
513 mtspr SPR_MAS1, %r5
514 isync
515 tlbwe
516 isync
517 msync
518 2: addi %r4, %r4, 1
519 cmpw %r4, %r3 /* Check if this is the last entry */
520 bne 1b
521 blr
522
523 #ifdef SMP
524 __boot_page_padding:
525 /*
526 * Boot page needs to be exactly 4K, with the last word of this page
527 * acting as the reset vector, so we need to stuff the remainder.
528 * Upon release from holdoff CPU fetches the last word of the boot
529 * page.
530 */
531 .space 4092 - (__boot_page_padding - __boot_page)
532 b __boot_page
533 #endif /* SMP */
534
535 /************************************************************************/
536 /* locore subroutines */
537 /************************************************************************/
538
539 ivor_setup:
540 /* Set base address of interrupt handler routines */
541 lis %r3, interrupt_vector_base@h
542 mtspr SPR_IVPR, %r3
543
544 /* Assign interrupt handler routines offsets */
545 li %r3, int_critical_input@l
546 mtspr SPR_IVOR0, %r3
547 li %r3, int_machine_check@l
548 mtspr SPR_IVOR1, %r3
549 li %r3, int_data_storage@l
550 mtspr SPR_IVOR2, %r3
551 li %r3, int_instr_storage@l
552 mtspr SPR_IVOR3, %r3
553 li %r3, int_external_input@l
554 mtspr SPR_IVOR4, %r3
555 li %r3, int_alignment@l
556 mtspr SPR_IVOR5, %r3
557 li %r3, int_program@l
558 mtspr SPR_IVOR6, %r3
559 li %r3, int_syscall@l
560 mtspr SPR_IVOR8, %r3
561 li %r3, int_decrementer@l
562 mtspr SPR_IVOR10, %r3
563 li %r3, int_fixed_interval_timer@l
564 mtspr SPR_IVOR11, %r3
565 li %r3, int_watchdog@l
566 mtspr SPR_IVOR12, %r3
567 li %r3, int_data_tlb_error@l
568 mtspr SPR_IVOR13, %r3
569 li %r3, int_inst_tlb_error@l
570 mtspr SPR_IVOR14, %r3
571 li %r3, int_debug@l
572 mtspr SPR_IVOR15, %r3
573 blr
574
575 /*
576 * void tid_flush(tlbtid_t tid);
577 *
578 * Invalidate all TLB0 entries which match the given TID. Note this is
579 * dedicated for cases when invalidation(s) should NOT be propagated to other
580 * CPUs.
581 *
582 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
583 * correctly (by tlb0_get_tlbconf()).
584 *
585 */
586 ENTRY(tid_flush)
587 cmpwi %r3, TID_KERNEL
588 beq tid_flush_end /* don't evict kernel translations */
589
590 /* Number of TLB0 ways */
591 lis %r4, tlb0_ways@h
592 ori %r4, %r4, tlb0_ways@l
593 lwz %r4, 0(%r4)
594
595 /* Number of entries / way */
596 lis %r5, tlb0_entries_per_way@h
597 ori %r5, %r5, tlb0_entries_per_way@l
598 lwz %r5, 0(%r5)
599
600 /* Disable interrupts */
601 mfmsr %r10
602 wrteei 0
603
604 li %r6, 0 /* ways counter */
605 loop_ways:
606 li %r7, 0 /* entries [per way] counter */
607 loop_entries:
608 /* Select TLB0 and ESEL (way) */
609 lis %r8, MAS0_TLBSEL0@h
610 rlwimi %r8, %r6, 16, 14, 15
611 mtspr SPR_MAS0, %r8
612 isync
613
614 /* Select EPN (entry within the way) */
615 rlwinm %r8, %r7, 12, 13, 19
616 mtspr SPR_MAS2, %r8
617 isync
618 tlbre
619
620 /* Check if valid entry */
621 mfspr %r8, SPR_MAS1
622 andis. %r9, %r8, MAS1_VALID@h
623 beq next_entry /* invalid entry */
624
625 /* Check if this is our TID */
626 rlwinm %r9, %r8, 16, 24, 31
627
628 cmplw %r9, %r3
629 bne next_entry /* not our TID */
630
631 /* Clear VALID bit */
632 rlwinm %r8, %r8, 0, 1, 31
633 mtspr SPR_MAS1, %r8
634 isync
635 tlbwe
636 isync
637 msync
638
639 next_entry:
640 addi %r7, %r7, 1
641 cmpw %r7, %r5
642 bne loop_entries
643
644 /* Next way */
645 addi %r6, %r6, 1
646 cmpw %r6, %r4
647 bne loop_ways
648
649 /* Restore MSR (possibly re-enable interrupts) */
650 mtmsr %r10
651 isync
652
653 tid_flush_end:
654 blr
655
656 /*
657 * Cache disable/enable/inval sequences according
658 * to section 2.16 of E500CORE RM.
659 */
660 ENTRY(dcache_inval)
661 /* Invalidate d-cache */
662 mfspr %r3, SPR_L1CSR0
663 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
664 msync
665 isync
666 mtspr SPR_L1CSR0, %r3
667 isync
668 1: mfspr %r3, SPR_L1CSR0
669 andi. %r3, %r3, L1CSR0_DCFI
670 bne 1b
671 blr
672
673 ENTRY(dcache_disable)
674 /* Disable d-cache */
675 mfspr %r3, SPR_L1CSR0
676 li %r4, L1CSR0_DCE@l
677 not %r4, %r4
678 and %r3, %r3, %r4
679 msync
680 isync
681 mtspr SPR_L1CSR0, %r3
682 isync
683 blr
684
685 ENTRY(dcache_enable)
686 /* Enable d-cache */
687 mfspr %r3, SPR_L1CSR0
688 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
689 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
690 msync
691 isync
692 mtspr SPR_L1CSR0, %r3
693 isync
694 blr
695
696 ENTRY(icache_inval)
697 /* Invalidate i-cache */
698 mfspr %r3, SPR_L1CSR1
699 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
700 isync
701 mtspr SPR_L1CSR1, %r3
702 isync
703 1: mfspr %r3, SPR_L1CSR1
704 andi. %r3, %r3, L1CSR1_ICFI
705 bne 1b
706 blr
707
708 ENTRY(icache_disable)
709 /* Disable i-cache */
710 mfspr %r3, SPR_L1CSR1
711 li %r4, L1CSR1_ICE@l
712 not %r4, %r4
713 and %r3, %r3, %r4
714 isync
715 mtspr SPR_L1CSR1, %r3
716 isync
717 blr
718
719 ENTRY(icache_enable)
720 /* Enable i-cache */
721 mfspr %r3, SPR_L1CSR1
722 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
723 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
724 isync
725 mtspr SPR_L1CSR1, %r3
726 isync
727 blr
728
729 /*
730 * int setfault()
731 *
732 * Similar to setjmp to setup for handling faults on accesses to user memory.
733 * Any routine using this may only call bcopy, either the form below,
734 * or the (currently used) C code optimized, so it doesn't use any non-volatile
735 * registers.
736 */
737 .globl setfault
738 setfault:
739 mflr %r0
740 mfsprg0 %r4
741 lwz %r4, TD_PCB(%r2)
742 stw %r3, PCB_ONFAULT(%r4)
743 mfcr %r10
744 mfctr %r11
745 mfxer %r12
746 stw %r0, 0(%r3)
747 stw %r1, 4(%r3)
748 stw %r2, 8(%r3)
749 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
750 li %r3, 0 /* return FALSE */
751 blr
752
753 /************************************************************************/
754 /* Data section */
755 /************************************************************************/
756 .data
757 .align 4
758 tmpstack:
759 .space TMPSTACKSZ
760
761 /*
762 * Compiled KERNBASE locations
763 */
764 .globl kernbase
765 .set kernbase, KERNBASE
766
767 /*
768 * Globals
769 */
770 #define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
771
772 GLOBAL(intrnames)
773 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
774 GLOBAL(sintrnames)
775 .long INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
776
777 .align 4
778 GLOBAL(intrcnt)
779 .space INTRCNT_COUNT * 4 * 2
780 GLOBAL(sintrcnt)
781 .long INTRCNT_COUNT * 4 * 2
782
783 #include <powerpc/booke/trap_subr.S>
Cache object: 84847f026354fa6520c0ac560b705363
|