1 /* $FreeBSD$ */
2 /* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */
3
4 /*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * NOTICE: This is not a standalone file. to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 * #include <powerpc/aim/trap_subr.S>
40 */
41
42 /* Locate the per-CPU data structure */
43 #define GET_CPUINFO(r) \
44 mfsprg0 r
45 #define GET_TOCBASE(r) \
46 lis r,DMAP_BASE_ADDRESS@highesta; /* To real-mode alias/dmap */ \
47 sldi r,r,32; \
48 ori r,r,TRAP_TOCBASE; /* Magic address for TOC */ \
49 ld r,0(r)
50
51 /*
52 * Restore SRs for a pmap
53 *
54 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
55 */
56
57 /*
58 * User SRs are loaded through a pointer to the current pmap.
59 * PCPU already in %r3
60 */
61 restore_usersrs:
62 ld %r28,PC_USERSLB(%r3)
63 cmpdi %r28, 0 /* If user SLB pointer NULL, exit */
64 beqlr
65
66 li %r29, 0 /* Set the counter to zero */
67
68 slbia
69 slbmfee %r31,%r29
70 clrrdi %r31,%r31,28
71 slbie %r31
72 1: ld %r31, 0(%r28) /* Load SLB entry pointer */
73 cmpdi %r31, 0 /* If NULL, stop */
74 beqlr
75
76 ld %r30, 0(%r31) /* Load SLBV */
77 ld %r31, 8(%r31) /* Load SLBE */
78 or %r31, %r31, %r29 /* Set SLBE slot */
79 slbmte %r30, %r31 /* Install SLB entry */
80
81 addi %r28, %r28, 8 /* Advance pointer */
82 addi %r29, %r29, 1
83 b 1b /* Repeat */
84
85 /*
86 * Kernel SRs are loaded directly from the PCPU fields
87 * PCPU in %r1
88 */
89 restore_kernsrs:
90 lwz %r29, PC_FLAGS(%r1)
91 mtcr %r29
92 btlr 0
93 addi %r28,%r1,PC_KERNSLB
94 ld %r29,16(%r28) /* One past USER_SLB_SLOT */
95 cmpdi %r29,0
96 beqlr /* If first kernel entry is invalid,
97 * SLBs not in use, so exit early */
98
99 /* Otherwise, set up SLBs */
100 li %r29, 0 /* Set the counter to zero */
101
102 slbia
103 slbmfee %r31,%r29
104 clrrdi %r31,%r31,28
105 slbie %r31
106 1: cmpdi %r29, USER_SLB_SLOT /* Skip the user slot */
107 beq- 2f
108
109 ld %r31, 8(%r28) /* Load SLBE */
110 cmpdi %r31, 0 /* If SLBE is not valid, stop */
111 beqlr
112 ld %r30, 0(%r28) /* Load SLBV */
113 slbmte %r30, %r31 /* Install SLB entry */
114
115 2: addi %r28, %r28, 16 /* Advance pointer */
116 addi %r29, %r29, 1
117 cmpdi %r29, 64 /* Repeat if we are not at the end */
118 blt 1b
119 blr
120
121 /*
122 * FRAME_SETUP assumes:
123 * SPRG1 SP (1)
124 * SPRG3 trap type
125 * savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
126 * r28 LR
127 * r29 CR
128 * r30 scratch
129 * r31 scratch
130 * r1 kernel stack
131 * SRR0/1 as at start of trap
132 *
133 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
134 * in any real-mode fault handler, including those handling double faults.
135 */
136 #define FRAME_SETUP(savearea) \
137 /* Have to enable translation to allow access of kernel stack: */ \
138 GET_CPUINFO(%r31); \
139 mfsrr0 %r30; \
140 std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \
141 mfsrr1 %r30; \
142 std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \
143 mfsprg1 %r31; /* get saved SP (clears SPRG1) */ \
144 mfmsr %r30; \
145 ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \
146 mtmsr %r30; /* stack can now be accessed */ \
147 isync; \
148 stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
149 std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \
150 std %r31,FRAME_1+48(%r1); /* save SP " " */ \
151 std %r2, FRAME_2+48(%r1); /* save r2 " " */ \
152 std %r28,FRAME_LR+48(%r1); /* save LR " " */ \
153 std %r29,FRAME_CR+48(%r1); /* save CR " " */ \
154 GET_CPUINFO(%r2); \
155 ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \
156 ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \
157 ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \
158 ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
159 ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
160 std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \
161 std %r4, FRAME_4+48(%r1); \
162 std %r5, FRAME_5+48(%r1); \
163 std %r6, FRAME_6+48(%r1); \
164 std %r7, FRAME_7+48(%r1); \
165 std %r8, FRAME_8+48(%r1); \
166 std %r9, FRAME_9+48(%r1); \
167 std %r10, FRAME_10+48(%r1); \
168 std %r11, FRAME_11+48(%r1); \
169 std %r12, FRAME_12+48(%r1); \
170 std %r13, FRAME_13+48(%r1); \
171 std %r14, FRAME_14+48(%r1); \
172 std %r15, FRAME_15+48(%r1); \
173 std %r16, FRAME_16+48(%r1); \
174 std %r17, FRAME_17+48(%r1); \
175 std %r18, FRAME_18+48(%r1); \
176 std %r19, FRAME_19+48(%r1); \
177 std %r20, FRAME_20+48(%r1); \
178 std %r21, FRAME_21+48(%r1); \
179 std %r22, FRAME_22+48(%r1); \
180 std %r23, FRAME_23+48(%r1); \
181 std %r24, FRAME_24+48(%r1); \
182 std %r25, FRAME_25+48(%r1); \
183 std %r26, FRAME_26+48(%r1); \
184 std %r27, FRAME_27+48(%r1); \
185 std %r28, FRAME_28+48(%r1); \
186 std %r29, FRAME_29+48(%r1); \
187 std %r30, FRAME_30+48(%r1); \
188 std %r31, FRAME_31+48(%r1); \
189 ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \
190 ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
191 ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \
192 ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \
193 mfxer %r3; \
194 mfctr %r4; \
195 mfsprg3 %r5; \
196 std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \
197 std %r4, FRAME_CTR+48(1); \
198 std %r5, FRAME_EXC+48(1); \
199 std %r28,FRAME_AIM_DAR+48(1); \
200 std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \
201 std %r30,FRAME_SRR0+48(1); \
202 std %r31,FRAME_SRR1+48(1); \
203 ld %r13,PC_CURTHREAD(%r2) /* set kernel curthread */
204
205 #define FRAME_LEAVE(savearea) \
206 /* Disable exceptions: */ \
207 mfmsr %r2; \
208 andi. %r2,%r2,~PSL_EE@l; \
209 mtmsr %r2; \
210 isync; \
211 /* Now restore regs: */ \
212 ld %r2,FRAME_SRR0+48(%r1); \
213 ld %r3,FRAME_SRR1+48(%r1); \
214 ld %r4,FRAME_CTR+48(%r1); \
215 ld %r5,FRAME_XER+48(%r1); \
216 ld %r6,FRAME_LR+48(%r1); \
217 GET_CPUINFO(%r7); \
218 std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \
219 std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \
220 ld %r7,FRAME_CR+48(%r1); \
221 mtctr %r4; \
222 mtxer %r5; \
223 mtlr %r6; \
224 mtsprg2 %r7; /* save cr */ \
225 ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \
226 ld %r30,FRAME_30+48(%r1); \
227 ld %r29,FRAME_29+48(%r1); \
228 ld %r28,FRAME_28+48(%r1); \
229 ld %r27,FRAME_27+48(%r1); \
230 ld %r26,FRAME_26+48(%r1); \
231 ld %r25,FRAME_25+48(%r1); \
232 ld %r24,FRAME_24+48(%r1); \
233 ld %r23,FRAME_23+48(%r1); \
234 ld %r22,FRAME_22+48(%r1); \
235 ld %r21,FRAME_21+48(%r1); \
236 ld %r20,FRAME_20+48(%r1); \
237 ld %r19,FRAME_19+48(%r1); \
238 ld %r18,FRAME_18+48(%r1); \
239 ld %r17,FRAME_17+48(%r1); \
240 ld %r16,FRAME_16+48(%r1); \
241 ld %r15,FRAME_15+48(%r1); \
242 ld %r14,FRAME_14+48(%r1); \
243 ld %r13,FRAME_13+48(%r1); \
244 ld %r12,FRAME_12+48(%r1); \
245 ld %r11,FRAME_11+48(%r1); \
246 ld %r10,FRAME_10+48(%r1); \
247 ld %r9, FRAME_9+48(%r1); \
248 ld %r8, FRAME_8+48(%r1); \
249 ld %r7, FRAME_7+48(%r1); \
250 ld %r6, FRAME_6+48(%r1); \
251 ld %r5, FRAME_5+48(%r1); \
252 ld %r4, FRAME_4+48(%r1); \
253 ld %r3, FRAME_3+48(%r1); \
254 ld %r2, FRAME_2+48(%r1); \
255 ld %r0, FRAME_0+48(%r1); \
256 ld %r1, FRAME_1+48(%r1); \
257 /* Can't touch %r1 from here on */ \
258 mtsprg3 %r3; /* save r3 */ \
259 /* Disable translation, machine check and recoverability: */ \
260 mfmsr %r3; \
261 andi. %r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \
262 mtmsr %r3; \
263 isync; \
264 /* Decide whether we return to user mode: */ \
265 GET_CPUINFO(%r3); \
266 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); \
267 mtcr %r3; \
268 bf 17,1f; /* branch if PSL_PR is false */ \
269 /* Restore user SRs */ \
270 GET_CPUINFO(%r3); \
271 std %r27,(savearea+CPUSAVE_R27)(%r3); \
272 lwz %r27,PC_FLAGS(%r3); \
273 mtcr %r27; \
274 bt 0, 0f; /* Check to skip restoring SRs. */ \
275 std %r28,(savearea+CPUSAVE_R28)(%r3); \
276 std %r29,(savearea+CPUSAVE_R29)(%r3); \
277 std %r30,(savearea+CPUSAVE_R30)(%r3); \
278 std %r31,(savearea+CPUSAVE_R31)(%r3); \
279 mflr %r27; /* preserve LR */ \
280 bl restore_usersrs; /* uses r28-r31 */ \
281 mtlr %r27; \
282 ld %r31,(savearea+CPUSAVE_R31)(%r3); \
283 ld %r30,(savearea+CPUSAVE_R30)(%r3); \
284 ld %r29,(savearea+CPUSAVE_R29)(%r3); \
285 ld %r28,(savearea+CPUSAVE_R28)(%r3); \
286 0: \
287 ld %r27,(savearea+CPUSAVE_R27)(%r3); \
288 1: mfsprg2 %r3; /* restore cr */ \
289 mtcr %r3; \
290 GET_CPUINFO(%r3); \
291 ld %r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */ \
292 mtsrr0 %r3; \
293 GET_CPUINFO(%r3); \
294 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */ \
295 mtsrr1 %r3; \
296 mfsprg3 %r3 /* restore r3 */
297
298 #ifdef KDTRACE_HOOKS
299 .data
300 .globl dtrace_invop_calltrap_addr
301 .align 8
302 .type dtrace_invop_calltrap_addr, @object
303 .size dtrace_invop_calltrap_addr, 8
304 dtrace_invop_calltrap_addr:
305 .word 0
306 .word 0
307
308 .text
309 #endif
310
311 /*
312 * Processor reset exception handler. These are typically
313 * the first instructions the processor executes after a
314 * software reset. We do this in two bits so that we are
315 * not still hanging around in the trap handling region
316 * once the MMU is turned on.
317 */
318 .globl CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler)
319 .globl CNAME(cpu_wakeup_handler)
320 .p2align 3
321 CNAME(rstcode):
322 #ifdef __LITTLE_ENDIAN__
323 /*
324 * XXX This shouldn't be necessary.
325 *
326 * According to the ISA documentation, LE should be set from HILE
327 * or the LPCR ILE bit automatically. However, the entry into this
328 * vector from OPAL_START_CPU does not honor this correctly.
329 *
330 * We should be able to define an alternate entry for opal's
331 * start_kernel_secondary asm code to branch to.
332 */
333 RETURN_TO_NATIVE_ENDIAN
334 #endif
335 /*
336 * Check if this is software reset or
337 * processor is waking up from power saving mode
338 * It is software reset when 46:47 = 0b00
339 */
340 /* 0x00 */
341 ld %r2,TRAP_GENTRAP(0) /* Real-mode &generictrap */
342 mfsrr1 %r9 /* Load SRR1 into r9 */
343 andis. %r9,%r9,0x3 /* Logic AND with 46:47 bits */
344
345 beq 2f /* Branch if software reset */
346 /* 0x10 */
347 /* Reset was wakeup */
348 addi %r9,%r2,(cpu_wakeup_handler-generictrap)
349 b 1f /* Was power save, do the wakeup */
350
351 /* Reset was software reset */
352 /* Explicitly set MSR[SF] */
353 2: mfmsr %r9
354 li %r8,1
355 /* 0x20 */
356 insrdi %r9,%r8,1,0
357 mtmsrd %r9
358 isync
359
360 addi %r9,%r2,(cpu_reset_handler-generictrap)
361
362 /* 0x30 */
363 1: mtlr %r9
364 blr /* Branch to either cpu_reset_handler
365 * or cpu_wakeup_handler.
366 */
367 CNAME(rstcodeend):
368
369 cpu_reset_handler:
370 GET_TOCBASE(%r2)
371
372 addis %r1,%r2,TOC_REF(tmpstk)@ha
373 ld %r1,TOC_REF(tmpstk)@l(%r1) /* get new SP */
374 addi %r1,%r1,(TMPSTKSZ-48)
375
376 bl CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
377 nop
378 lis %r3,1@l
379 bl CNAME(pmap_cpu_bootstrap) /* Turn on virtual memory */
380 nop
381 bl CNAME(cpudep_ap_bootstrap) /* Set up PCPU and stack */
382 nop
383 mr %r1,%r3 /* Use new stack */
384 bl CNAME(cpudep_ap_setup)
385 nop
386 GET_CPUINFO(%r5)
387 ld %r3,(PC_RESTORE)(%r5)
388 cmpldi %cr0,%r3,0
389 beq %cr0,2f
390 nop
391 li %r4,1
392 bl CNAME(longjmp)
393 nop
394 2:
395 #ifdef SMP
396 bl CNAME(machdep_ap_bootstrap) /* And away! */
397 nop
398 #endif
399
400 /* Should not be reached */
401 9:
402 b 9b
403
404 cpu_wakeup_handler:
405 GET_TOCBASE(%r2)
406
407 /* Check for false wake up due to badly SRR1 set (eg. by OPAL) */
408 addis %r3,%r2,TOC_REF(can_wakeup)@ha
409 ld %r3,TOC_REF(can_wakeup)@l(%r3)
410 ld %r3,0(%r3)
411 cmpdi %r3,0
412 beq cpu_reset_handler
413
414 /* Turn on MMU after return from interrupt */
415 mfsrr1 %r3
416 ori %r3,%r3,(PSL_IR | PSL_DR)
417 mtsrr1 %r3
418
419 /* Turn on MMU (needed to access PCB) */
420 mfmsr %r3
421 ori %r3,%r3,(PSL_IR | PSL_DR)
422 mtmsr %r3
423 isync
424
425 mfsprg0 %r3
426
427 ld %r3,PC_CURTHREAD(%r3) /* Get current thread */
428 ld %r3,TD_PCB(%r3) /* Get PCB of current thread */
429 ld %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs. */
430 ld %r13,PCB_CONTEXT+1*8(%r3)
431 ld %r14,PCB_CONTEXT+2*8(%r3)
432 ld %r15,PCB_CONTEXT+3*8(%r3)
433 ld %r16,PCB_CONTEXT+4*8(%r3)
434 ld %r17,PCB_CONTEXT+5*8(%r3)
435 ld %r18,PCB_CONTEXT+6*8(%r3)
436 ld %r19,PCB_CONTEXT+7*8(%r3)
437 ld %r20,PCB_CONTEXT+8*8(%r3)
438 ld %r21,PCB_CONTEXT+9*8(%r3)
439 ld %r22,PCB_CONTEXT+10*8(%r3)
440 ld %r23,PCB_CONTEXT+11*8(%r3)
441 ld %r24,PCB_CONTEXT+12*8(%r3)
442 ld %r25,PCB_CONTEXT+13*8(%r3)
443 ld %r26,PCB_CONTEXT+14*8(%r3)
444 ld %r27,PCB_CONTEXT+15*8(%r3)
445 ld %r28,PCB_CONTEXT+16*8(%r3)
446 ld %r29,PCB_CONTEXT+17*8(%r3)
447 ld %r30,PCB_CONTEXT+18*8(%r3)
448 ld %r31,PCB_CONTEXT+19*8(%r3)
449 ld %r5,PCB_CR(%r3) /* Load the condition register */
450 mtcr %r5
451 ld %r5,PCB_LR(%r3) /* Load the link register */
452 mtsrr0 %r5
453 ld %r1,PCB_SP(%r3) /* Load the stack pointer */
454 ld %r2,PCB_TOC(%r3) /* Load the TOC pointer */
455
456 rfid
457
458 /*
459 * This code gets copied to all the trap vectors
460 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
461 */
462
463 .globl CNAME(trapcode),CNAME(trapcodeend)
464 .p2align 3
465 CNAME(trapcode):
466 mtsprg1 %r1 /* save SP */
467 mflr %r1 /* Save the old LR in r1 */
468 mtsprg2 %r1 /* And then in SPRG2 */
469 ld %r1,TRAP_ENTRY(0)
470 mtlr %r1
471 li %r1, 0xe0 /* How to get the vector from LR */
472 blrl /* Branch to generictrap */
473 CNAME(trapcodeend):
474
475 /* Same thing for traps setting HSRR0/HSRR1 */
476 .globl CNAME(hypertrapcode),CNAME(hypertrapcodeend)
477 .p2align 3
478 CNAME(hypertrapcode):
479 mtsprg1 %r1 /* save SP */
480 mflr %r1 /* Save the old LR in r1 */
481 mtsprg2 %r1 /* And then in SPRG2 */
482 ld %r1,TRAP_GENTRAP(0)
483 addi %r1,%r1,(generichypertrap-generictrap)
484 mtlr %r1
485 li %r1, 0xe0 /* How to get the vector from LR */
486 blrl /* Branch to generichypertrap */
487 CNAME(hypertrapcodeend):
488
489 /*
490 * For SLB misses: do special things for the kernel
491 *
492 * Note: SPRG1 is always safe to overwrite any time the MMU was on, which is
493 * the only time this can be called.
494 */
495 .globl CNAME(slbtrap),CNAME(slbtrapend)
496 .p2align 3
497 CNAME(slbtrap):
498 /* 0x00 */
499 mtsprg1 %r1 /* save SP */
500 GET_CPUINFO(%r1)
501 std %r2,(PC_SLBSAVE+16)(%r1) /* save r2 */
502 mfcr %r2
503 /* 0x10 */
504 std %r2,(PC_SLBSAVE+104)(%r1) /* save CR */
505 mfsrr1 %r2 /* test kernel mode */
506 mtcr %r2
507 bf 17,2f /* branch if PSL_PR is false */
508 /* 0x20 */
509 /* User mode */
510 ld %r2,(PC_SLBSAVE+104)(%r1)
511 mtcr %r2 /* restore CR */
512 ld %r2,(PC_SLBSAVE+16)(%r1) /* restore r2 */
513 mflr %r1
514 /* 0x30 */
515 mtsprg2 %r1 /* save LR in SPRG2 */
516 ld %r1,TRAP_ENTRY(0) /* real-mode &generictrap */
517 mtlr %r1
518 li %r1, 0x80 /* How to get the vector from LR */
519 /* 0x40 */
520 blrl /* Branch to generictrap */
521 2: mflr %r2 /* Save the old LR in r2 */
522 /* Kernel mode */
523 ld %r1,TRAP_GENTRAP(0) /* Real-mode &generictrap */
524 addi %r1,%r1,(kern_slbtrap-generictrap)
525 /* 0x50 */
526 mtlr %r1
527 GET_CPUINFO(%r1)
528 blrl /* Branch to kern_slbtrap */
529 /* must fit in 128 bytes! */
530 CNAME(slbtrapend):
531
532 /*
533 * On entry:
534 * SPRG1: SP
535 * r1: pcpu
536 * r2: LR
537 * LR: branch address in trap region
538 */
539 kern_slbtrap:
540 std %r2,(PC_SLBSAVE+136)(%r1) /* old LR */
541 std %r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
542
543 /* Check if this needs to be handled as a regular trap (userseg miss) */
544 mflr %r2
545 andi. %r2,%r2,0xff80
546 cmpwi %r2,EXC_DSE
547 bne 1f
548 mfdar %r2
549 b 2f
550 1: mfsrr0 %r2
551 2: /* r2 now contains the fault address */
552 lis %r3,SEGMENT_MASK@highesta
553 ori %r3,%r3,SEGMENT_MASK@highera
554 sldi %r3,%r3,32
555 oris %r3,%r3,SEGMENT_MASK@ha
556 ori %r3,%r3,SEGMENT_MASK@l
557 and %r2,%r2,%r3 /* R2 = segment base address */
558 lis %r3,USER_ADDR@highesta
559 ori %r3,%r3,USER_ADDR@highera
560 sldi %r3,%r3,32
561 oris %r3,%r3,USER_ADDR@ha
562 ori %r3,%r3,USER_ADDR@l
563 cmpd %r2,%r3 /* Compare fault base to USER_ADDR */
564 bne 3f
565
566 /* User seg miss, handle as a regular trap */
567 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
568 mtcr %r2
569 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
570 ld %r3,(PC_SLBSAVE+24)(%r1)
571 ld %r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
572 mtsprg2 %r1 /* And then in SPRG2 */
573 li %r1, 0x80 /* How to get the vector from LR */
574 b generictrap /* Retain old LR using b */
575
576 3: /* Real kernel SLB miss */
577 std %r0,(PC_SLBSAVE+0)(%r1) /* free all volatile regs */
578 mfsprg1 %r2 /* Old R1 */
579 std %r2,(PC_SLBSAVE+8)(%r1)
580 /* R2,R3 already saved */
581 std %r4,(PC_SLBSAVE+32)(%r1)
582 std %r5,(PC_SLBSAVE+40)(%r1)
583 std %r6,(PC_SLBSAVE+48)(%r1)
584 std %r7,(PC_SLBSAVE+56)(%r1)
585 std %r8,(PC_SLBSAVE+64)(%r1)
586 std %r9,(PC_SLBSAVE+72)(%r1)
587 std %r10,(PC_SLBSAVE+80)(%r1)
588 std %r11,(PC_SLBSAVE+88)(%r1)
589 std %r12,(PC_SLBSAVE+96)(%r1)
590 /* CR already saved */
591 mfxer %r2 /* save XER */
592 std %r2,(PC_SLBSAVE+112)(%r1)
593 mflr %r2 /* save LR (SP already saved) */
594 std %r2,(PC_SLBSAVE+120)(%r1)
595 mfctr %r2 /* save CTR */
596 std %r2,(PC_SLBSAVE+128)(%r1)
597
598 /* Call handler */
599 addi %r1,%r1,PC_SLBSTACK-48+1024
600 li %r2,~15
601 and %r1,%r1,%r2
602 GET_TOCBASE(%r2)
603 mflr %r3
604 andi. %r3,%r3,0xff80
605 mfdar %r4
606 mfsrr0 %r5
607 bl handle_kernel_slb_spill
608 nop
609
610 /* Save r28-31, restore r4-r12 */
611 GET_CPUINFO(%r1)
612 ld %r4,(PC_SLBSAVE+32)(%r1)
613 ld %r5,(PC_SLBSAVE+40)(%r1)
614 ld %r6,(PC_SLBSAVE+48)(%r1)
615 ld %r7,(PC_SLBSAVE+56)(%r1)
616 ld %r8,(PC_SLBSAVE+64)(%r1)
617 ld %r9,(PC_SLBSAVE+72)(%r1)
618 ld %r10,(PC_SLBSAVE+80)(%r1)
619 ld %r11,(PC_SLBSAVE+88)(%r1)
620 ld %r12,(PC_SLBSAVE+96)(%r1)
621 std %r28,(PC_SLBSAVE+64)(%r1)
622 std %r29,(PC_SLBSAVE+72)(%r1)
623 std %r30,(PC_SLBSAVE+80)(%r1)
624 std %r31,(PC_SLBSAVE+88)(%r1)
625
626 /* Restore kernel mapping */
627 bl restore_kernsrs
628
629 /* Restore remaining registers */
630 ld %r28,(PC_SLBSAVE+64)(%r1)
631 ld %r29,(PC_SLBSAVE+72)(%r1)
632 ld %r30,(PC_SLBSAVE+80)(%r1)
633 ld %r31,(PC_SLBSAVE+88)(%r1)
634
635 ld %r2,(PC_SLBSAVE+104)(%r1)
636 mtcr %r2
637 ld %r2,(PC_SLBSAVE+112)(%r1)
638 mtxer %r2
639 ld %r2,(PC_SLBSAVE+120)(%r1)
640 mtlr %r2
641 ld %r2,(PC_SLBSAVE+128)(%r1)
642 mtctr %r2
643 ld %r2,(PC_SLBSAVE+136)(%r1)
644 mtlr %r2
645
646 /* Restore r0-r3 */
647 ld %r0,(PC_SLBSAVE+0)(%r1)
648 ld %r2,(PC_SLBSAVE+16)(%r1)
649 ld %r3,(PC_SLBSAVE+24)(%r1)
650 mfsprg1 %r1
651
652 /* Back to whatever we were doing */
653 rfid
654
655 /*
656 * For ALI: has to save DSISR and DAR
657 */
658 .globl CNAME(alitrap),CNAME(aliend)
659 CNAME(alitrap):
660 mtsprg1 %r1 /* save SP */
661 GET_CPUINFO(%r1)
662 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
663 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
664 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
665 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
666 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
667 mfdar %r30
668 mfdsisr %r31
669 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
670 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
671 mfsprg1 %r1 /* restore SP, in case of branch */
672 mflr %r28 /* save LR */
673 mfcr %r29 /* save CR */
674
675 ld %r31,TRAP_GENTRAP(0)
676 addi %r31,%r31,(s_trap - generictrap)
677 mtlr %r31
678
679 /* Put our exception vector in SPRG3 */
680 li %r31, EXC_ALI
681 mtsprg3 %r31
682
683 /* Test whether we already had PR set */
684 mfsrr1 %r31
685 mtcr %r31
686 blrl /* Branch to s_trap */
687 CNAME(aliend):
688
689 /*
690 * Similar to the above for DSI
691 * Has to handle standard pagetable spills
692 */
693 .globl CNAME(dsitrap),CNAME(dsiend)
694 .p2align 3
695 CNAME(dsitrap):
696 mtsprg1 %r1 /* save SP */
697 GET_CPUINFO(%r1)
698 std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
699 std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
700 std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
701 std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
702 std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
703 mfcr %r29 /* save CR */
704 mfxer %r30 /* save XER */
705 mtsprg2 %r30 /* in SPRG2 */
706 mfsrr1 %r31 /* test kernel mode */
707 mtcr %r31
708 mflr %r28 /* save LR (SP already saved) */
709 ld %r1,TRAP_GENTRAP(0)
710 addi %r1,%r1,(disitrap-generictrap)
711 mtlr %r1
712 blrl /* Branch to disitrap */
713 CNAME(dsiend):
714
715 /*
716 * Preamble code for DSI/ISI traps
717 */
718 disitrap:
719 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */
720 mflr %r1
721 andi. %r1,%r1,0xff00
722 mtsprg3 %r1
723
724 GET_CPUINFO(%r1)
725 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
726 std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
727 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
728 std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
729 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
730 std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
731 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
732 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
733 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
734 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
735 mfdar %r30
736 mfdsisr %r31
737 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
738 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
739
740 #ifdef KDB
741 /* Try to detect a kernel stack overflow */
742 mfsrr1 %r31
743 mtcr %r31
744 bt 17,realtrap /* branch is user mode */
745 mfsprg1 %r31 /* get old SP */
746 clrrdi %r31,%r31,12 /* Round SP down to nearest page */
747 sub. %r30,%r31,%r30 /* SP - DAR */
748 bge 1f
749 neg %r30,%r30 /* modulo value */
750 1: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */
751 bge %cr0,realtrap /* no, too far away. */
752
753 /* Now convert this DSI into a DDB trap. */
754 GET_CPUINFO(%r1)
755 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
756 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
757 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
758 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
759 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */
760 std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */
761 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */
762 std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */
763 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */
764 std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */
765 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */
766 std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */
767 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */
768 std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */
769 b dbtrap
770 #endif
771
772 /* XXX need stack probe here */
773 realtrap:
774 /* Test whether we already had PR set */
775 mfsrr1 %r1
776 mtcr %r1
777 mfsprg1 %r1 /* restore SP (might have been
778 overwritten) */
779 bf 17,k_trap /* branch if PSL_PR is false */
780 GET_CPUINFO(%r1)
781 mr %r27,%r28 /* Save LR, r29 */
782 mtsprg2 %r29
783 bl restore_kernsrs /* enable kernel mapping */
784 mfsprg2 %r29
785 mr %r28,%r27
786 ld %r1,PC_CURPCB(%r1)
787 b s_trap
788
789 /*
790 * generictrap does some standard setup for trap handling to minimize
791 * the code that need be installed in the actual vectors. It expects
792 * the following conditions.
793 *
794 * R1 - Trap vector = LR & (0xff00 | R1)
795 * SPRG1 - Original R1 contents
796 * SPRG2 - Original LR
797 */
798
799 generichypertrap:
800 mtsprg3 %r1
801 mfspr %r1, SPR_HSRR0
802 mtsrr0 %r1
803 mfspr %r1, SPR_HSRR1
804 mtsrr1 %r1
805 mfsprg3 %r1
806 .globl CNAME(generictrap)
807 generictrap:
808 /* Save R1 for computing the exception vector */
809 mtsprg3 %r1
810
811 /* Save interesting registers */
812 GET_CPUINFO(%r1)
813 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
814 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
815 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
816 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
817 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
818 mfdar %r30
819 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
820 mfdsisr %r30
821 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
822 mfsprg1 %r1 /* restore SP, in case of branch */
823 mfsprg2 %r28 /* save LR */
824 mfcr %r29 /* save CR */
825
826 /* Compute the exception vector from the link register */
827 mfsprg3 %r31
828 ori %r31,%r31,0xff00
829 mflr %r30
830 addi %r30,%r30,-4 /* The branch instruction, not the next */
831 and %r30,%r30,%r31
832 mtsprg3 %r30
833
834 /* Test whether we already had PR set */
835 mfsrr1 %r31
836 mtcr %r31
837
838 s_trap:
839 bf 17,k_trap /* branch if PSL_PR is false */
840 GET_CPUINFO(%r1)
841 u_trap:
842 mr %r27,%r28 /* Save LR, r29 */
843 mtsprg2 %r29
844 bl restore_kernsrs /* enable kernel mapping */
845 mfsprg2 %r29
846 mr %r28,%r27
847 ld %r1,PC_CURPCB(%r1)
848
849 /*
850 * Now the common trap catching code.
851 */
852 k_trap:
853 FRAME_SETUP(PC_TEMPSAVE)
854 /* Call C interrupt dispatcher: */
855 trapagain:
856 GET_TOCBASE(%r2)
857 addi %r3,%r1,48
858 bl CNAME(powerpc_interrupt)
859 nop
860
861 .globl CNAME(trapexit) /* backtrace code sentinel */
862 CNAME(trapexit):
863 /* Disable interrupts: */
864 mfmsr %r3
865 andi. %r3,%r3,~PSL_EE@l
866 mtmsr %r3
867 isync
868 /* Test AST pending: */
869 ld %r5,FRAME_SRR1+48(%r1)
870 mtcr %r5
871 bf 17,1f /* branch if PSL_PR is false */
872
873 GET_CPUINFO(%r3) /* get per-CPU pointer */
874 lwz %r4,TD_AST(%r13) /* get thread ast value */
875 cmpwi %r4,0
876 beq 1f
877 mfmsr %r3 /* re-enable interrupts */
878 ori %r3,%r3,PSL_EE@l
879 mtmsr %r3
880 isync
881 GET_TOCBASE(%r2)
882 addi %r3,%r1,48
883 bl CNAME(ast)
884 nop
885 .globl CNAME(asttrapexit) /* backtrace code sentinel #2 */
886 CNAME(asttrapexit):
887 b trapexit /* test ast ret value ? */
888 1:
889 FRAME_LEAVE(PC_TEMPSAVE)
890 rfid
891
892 #if defined(KDB)
893 /*
894 * Deliberate entry to dbtrap
895 */
896 ASENTRY_NOPROF(breakpoint)
897 mtsprg1 %r1
898 mfmsr %r3
899 mtsrr1 %r3
900 andi. %r3,%r3,~(PSL_EE|PSL_ME)@l
901 mtmsr %r3 /* disable interrupts */
902 isync
903 GET_CPUINFO(%r3)
904 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
905 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
906 std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
907 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
908 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
909 mflr %r28
910 li %r29,EXC_BPT
911 mtlr %r29
912 mfcr %r29
913 mtsrr0 %r28
914
915 /*
916 * Now the kdb trap catching code.
917 */
918 dbtrap:
919 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */
920 mflr %r1
921 andi. %r1,%r1,0xff00
922 mtsprg3 %r1
923
924 GET_TOCBASE(%r1) /* get new SP */
925 addis %r1,%r1,TOC_REF(trapstk)@ha
926 ld %r1,TOC_REF(trapstk)@l(%r1)
927 addi %r1,%r1,(TRAPSTKSZ-48)
928
929 FRAME_SETUP(PC_DBSAVE)
930 /* Call C trap code: */
931 GET_TOCBASE(%r2)
932 addi %r3,%r1,48
933 bl CNAME(db_trap_glue)
934 nop
935 or. %r3,%r3,%r3
936 bne dbleave
937 /* This wasn't for KDB, so switch to real trap: */
938 ld %r3,FRAME_EXC+48(%r1) /* save exception */
939 GET_CPUINFO(%r4)
940 std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
941 FRAME_LEAVE(PC_DBSAVE)
942 mtsprg1 %r1 /* prepare for entrance to realtrap */
943 GET_CPUINFO(%r1)
944 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
945 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
946 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
947 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
948 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
949 mflr %r28
950 mfcr %r29
951 ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
952 mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */
953 mfsprg1 %r1
954 b realtrap
955 dbleave:
956 FRAME_LEAVE(PC_DBSAVE)
957 rfid
958 ASEND(breakpoint)
959
960 /*
961 * In case of KDB we want a separate trap catcher for it
962 */
963 .globl CNAME(dblow),CNAME(dbend)
964 .p2align 3
965 CNAME(dblow):
966 mtsprg1 %r1 /* save SP */
967 mtsprg2 %r29 /* save r29 */
968 mfcr %r29 /* save CR in r29 */
969 mfsrr1 %r1
970 mtcr %r1
971 bf 17,1f /* branch if privileged */
972
973 /* Unprivileged case */
974 mtcr %r29 /* put the condition register back */
975 mfsprg2 %r29 /* ... and r29 */
976 mflr %r1 /* save LR */
977 mtsprg2 %r1 /* And then in SPRG2 */
978
979 ld %r1, TRAP_ENTRY(0) /* Get branch address */
980 mtlr %r1
981 li %r1, 0 /* How to get the vector from LR */
982 blrl /* Branch to generictrap */
983 /* No fallthrough */
984 1:
985 GET_CPUINFO(%r1)
986 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */
987 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
988 mfsprg2 %r28 /* r29 holds cr... */
989 std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */
990 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */
991 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */
992 mflr %r28 /* save LR */
993 ld %r1,TRAP_GENTRAP(0)
994 addi %r1,%r1,(dbtrap-generictrap)
995 mtlr %r1
996 blrl /* Branch to dbtrap */
997 CNAME(dbend):
998 #endif /* KDB */
Cache object: 2792ecce2aef31e26f4329d2bb71928f
|