1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
2 /*-
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
40 *
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
47 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
48 * JNPR: exception.S,v 1.5 2007/01/08 04:58:37 katta
49 * $FreeBSD: releng/8.0/sys/mips/mips/exception.S 178172 2008-04-13 07:27:37Z imp $
50 */
51
52 /*
53 * Contains code that is the first executed at boot time plus
54 * assembly language support routines.
55 */
56
57 #include "opt_cputype.h"
58 #include "opt_ddb.h"
59 #include <machine/asm.h>
60 #include <machine/cpu.h>
61 #include <machine/regnum.h>
62 #include <machine/cpuregs.h>
63 #include <machine/pte.h>
64
65 #include "assym.s"
66
67 #if defined(ISA_MIPS32)
68 #undef WITH_64BIT_CP0
69 #elif defined(ISA_MIPS64)
70 #define WITH_64BIT_CP0
71 #elif defined(ISA_MIPS3)
72 #define WITH_64BIT_CP0
73 #else
74 #error "Please write the code for this ISA"
75 #endif
76
77 #ifdef WITH_64BIT_CP0
78 #define _SLL dsll
79 #define _SRL dsrl
80 #define _MFC0 dmfc0
81 #define _MTC0 dmtc0
82 #define WIRED_SHIFT 34
83 #else
84 #define _SLL sll
85 #define _SRL srl
86 #define _MFC0 mfc0
87 #define _MTC0 mtc0
88 #define WIRED_SHIFT 2
89 #endif
90 .set noreorder # Noreorder is default style!
91 #if defined(ISA_MIPS32)
92 .set mips32
93 #elif defined(ISA_MIPS64)
94 .set mips64
95 #elif defined(ISA_MIPS3)
96 .set mips3
97 #endif
98
99 /*
100 * Assume that w alaways need nops to escape CP0 hazard
101 * TODO: Make hazard delays configurable. Stuck with 5 cycles on the moment
102 * For more info on CP0 hazards see Chapter 7 (p.99) of "MIPS32 Architecture
103 * For Programmers Volume III: The MIPS32 Privileged Resource Architecture"
104 */
105 #define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
106 #define HAZARD_DELAY nop;nop;nop;nop;nop;
107
108 /*
109 *----------------------------------------------------------------------------
110 *
111 * MipsTLBMiss --
112 *
113 * Vector code for the TLB-miss exception vector 0x80000000.
114 *
115 * This code is copied to the TLB exception vector address to
116 * which the CPU jumps in response to an exception or a TLB miss.
117 * NOTE: This code must be position independent!!!
118 *
119 *
120 */
121
122 .set noat
123 VECTOR(MipsTLBMiss, unknown)
124 j _C_LABEL(MipsDoTLBMiss)
125 mfc0 k0, COP_0_BAD_VADDR # get the fault address
126 nop
127 VECTOR_END(MipsTLBMiss)
128
129 /*
130 *----------------------------------------------------------------------------
131 *
132 * MipsDoTLBMiss --
133 *
134 * This is the real TLB Miss Handler code.
135 * 'segbase' points to the base of the segment table for user processes.
136 *
137 * Don't check for invalid pte's here. We load them as well and
138 * let the processor trap to load the correct value after service.
139 *----------------------------------------------------------------------------
140 */
141 MipsDoTLBMiss:
142 #ifndef SMP
143 lui k1, %hi(_C_LABEL(pcpup))
144 #endif
145 #k0 already has BadVA
146 bltz k0, 1f #02: k0<0 -> 1f (kernel fault)
147 srl k0, k0, SEGSHIFT - 2 #03: k0=seg offset (almost)
148 #ifdef SMP
149 GET_CPU_PCPU(k1)
150 #else
151 lw k1, %lo(_C_LABEL(pcpup))(k1)
152 #endif
153 lw k1, PC_SEGBASE(k1)
154 beqz k1, 2f #05: make sure segbase is not null
155 andi k0, k0, 0x7fc #06: k0=seg offset (mask 0x3)
156 addu k1, k0, k1 #07: k1=seg entry address
157 lw k1, 0(k1) #08: k1=seg entry
158 mfc0 k0, COP_0_BAD_VADDR #09: k0=bad address (again)
159 beq k1, zero, 2f #0a: ==0 -- no page table
160 srl k0, PGSHIFT - 2 #0b: k0=VPN (aka va>>10)
161
162 andi k0, k0, ((NPTEPG/2) - 1) << 3 #0c: k0=page tab offset
163 addu k1, k1, k0 #0d: k1=pte address
164 lw k0, 0(k1) #0e: k0=lo0 pte
165 lw k1, 4(k1) #0f: k1=lo1 pte
166 _SLL k0, k0, WIRED_SHIFT #10: keep bottom 30 bits
167 _SRL k0, k0, WIRED_SHIFT #11: keep bottom 30 bits
168 _MTC0 k0, COP_0_TLB_LO0 #12: lo0 is loaded
169 _SLL k1, k1, WIRED_SHIFT #13: keep bottom 30 bits
170 _SRL k1, k1, WIRED_SHIFT #14: keep bottom 30 bits
171 _MTC0 k1, COP_0_TLB_LO1 #15: lo1 is loaded
172 HAZARD_DELAY
173 tlbwr #1a: write to tlb
174 HAZARD_DELAY
175 eret #1f: retUrn from exception
176 1: j _C_LABEL(MipsTLBMissException) #20: kernel exception
177 nop #21: branch delay slot
178 2: j SlowFault #22: no page table present
179 nop #23: branch delay slot
180
181 .set at
182
183 /*
184 * This code is copied to the general exception vector address to
185 * handle all execptions except RESET and TLBMiss.
186 * NOTE: This code must be position independent!!!
187 */
188 VECTOR(MipsException, unknown)
189 /*
190 * Find out what mode we came from and jump to the proper handler.
191 */
192 .set noat
193 mfc0 k0, COP_0_STATUS_REG # Get the status register
194 mfc0 k1, COP_0_CAUSE_REG # Get the cause register value.
195 and k0, k0, SR_KSU_USER # test for user mode
196 # sneaky but the bits are
197 # with us........
198 sll k0, k0, 3 # shift user bit for cause index
199 and k1, k1, CR_EXC_CODE # Mask out the cause bits.
200 or k1, k1, k0 # change index to user table
201 1:
202 la k0, _C_LABEL(machExceptionTable) # get base of the jump table
203 addu k0, k0, k1 # Get the address of the
204 # function entry. Note that
205 # the cause is already
206 # shifted left by 2 bits so
207 # we dont have to shift.
208 lw k0, 0(k0) # Get the function address
209 nop
210 j k0 # Jump to the function.
211 nop
212 .set at
213 VECTOR_END(MipsException)
214
215 /*
216 * We couldn't find a TLB entry.
217 * Find out what mode we came from and call the appropriate handler.
218 */
219 SlowFault:
220 .set noat
221 mfc0 k0, COP_0_STATUS_REG
222 nop
223 and k0, k0, SR_KSU_USER
224 bne k0, zero, _C_LABEL(MipsUserGenException)
225 nop
226 .set at
227 /*
228 * Fall though ...
229 */
230
231 /*----------------------------------------------------------------------------
232 *
233 * MipsKernGenException --
234 *
235 * Handle an exception from kernel mode.
236 *
237 * Results:
238 * None.
239 *
240 * Side effects:
241 * None.
242 *
243 *----------------------------------------------------------------------------
244 */
245 #if defined(ISA_MIPS32)
246 #define STORE sw /* 32 bit mode regsave instruction */
247 #define LOAD lw /* 32 bit mode regload instruction */
248 #define RSIZE 4 /* 32 bit mode register size */
249 #elif defined(ISA_MIPS64)
250 #define STORE sd /* 64 bit mode regsave instruction */
251 #define LOAD ld /* 64 bit mode regload instruction */
252 #define RSIZE 8 /* 64 bit mode register size */
253 #else
254 #error "Please write code for this isa."
255 #endif
256
257 #define SAVE_REG(reg, offs, base) \
258 STORE reg, STAND_ARG_SIZE + (RSIZE * offs) (base)
259
260 #ifdef TARGET_OCTEON
261 #define CLEAR_STATUS \
262 mfc0 a0, COP_0_STATUS_REG ;\
263 li a2, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX) ; \
264 or a0, a0, a2 ; \
265 li a2, ~(MIPS_SR_INT_IE|MIPS_SR_EXL) ; \
266 and a0, a0, a2 ; \
267 mtc0 a0, COP_0_STATUS_REG
268 #else
269 #define CLEAR_STATUS \
270 mfc0 a0, COP_0_STATUS_REG ;\
271 li a2, ~(MIPS_SR_INT_IE|MIPS_SR_EXL) ; \
272 and a0, a0, a2 ; \
273 mtc0 a0, COP_0_STATUS_REG
274 #endif
275
276 #define SAVE_CPU \
277 SAVE_REG(AT, AST, sp) ;\
278 .set at ; \
279 SAVE_REG(v0, V0, sp) ;\
280 SAVE_REG(v1, V1, sp) ;\
281 SAVE_REG(a0, A0, sp) ;\
282 SAVE_REG(a1, A1, sp) ;\
283 SAVE_REG(a2, A2, sp) ;\
284 SAVE_REG(a3, A3, sp) ;\
285 SAVE_REG(t0, T0, sp) ;\
286 SAVE_REG(t1, T1, sp) ;\
287 SAVE_REG(t2, T2, sp) ;\
288 SAVE_REG(t3, T3, sp) ;\
289 SAVE_REG(t4, T4, sp) ;\
290 SAVE_REG(t5, T5, sp) ;\
291 SAVE_REG(t6, T6, sp) ;\
292 SAVE_REG(t7, T7, sp) ;\
293 SAVE_REG(t8, T8, sp) ;\
294 SAVE_REG(t9, T9, sp) ;\
295 SAVE_REG(gp, GP, sp) ;\
296 SAVE_REG(s0, S0, sp) ;\
297 SAVE_REG(s1, S1, sp) ;\
298 SAVE_REG(s2, S2, sp) ;\
299 SAVE_REG(s3, S3, sp) ;\
300 SAVE_REG(s4, S4, sp) ;\
301 SAVE_REG(s5, S5, sp) ;\
302 SAVE_REG(s6, S6, sp) ;\
303 SAVE_REG(s7, S7, sp) ;\
304 SAVE_REG(s8, S8, sp) ;\
305 mflo v0 ;\
306 mfhi v1 ;\
307 mfc0 a0, COP_0_STATUS_REG ;\
308 mfc0 a1, COP_0_CAUSE_REG ;\
309 mfc0 a2, COP_0_BAD_VADDR ;\
310 mfc0 a3, COP_0_EXC_PC ;\
311 SAVE_REG(v0, MULLO, sp) ;\
312 SAVE_REG(v1, MULHI, sp) ;\
313 SAVE_REG(a0, SR, sp) ;\
314 SAVE_REG(a1, CAUSE, sp) ;\
315 SAVE_REG(ra, RA, sp) ;\
316 SAVE_REG(a2, BADVADDR, sp) ;\
317 SAVE_REG(a3, PC, sp) ;\
318 addu v0, sp, KERN_EXC_FRAME_SIZE ;\
319 SAVE_REG(v0, SP, sp) ;\
320 CLEAR_STATUS ;\
321 addu a0, sp, STAND_ARG_SIZE ;\
322 ITLBNOPFIX
323
324 #define RESTORE_REG(reg, offs, base) \
325 LOAD reg, STAND_ARG_SIZE + (RSIZE * offs) (base)
326
327 #define RESTORE_CPU \
328 mtc0 zero,COP_0_STATUS_REG ;\
329 RESTORE_REG(a0, SR, sp) ;\
330 RESTORE_REG(t0, MULLO, sp) ;\
331 RESTORE_REG(t1, MULHI, sp) ;\
332 mtc0 a0, COP_0_STATUS_REG ;\
333 mtlo t0 ;\
334 mthi t1 ;\
335 _MTC0 v0, COP_0_EXC_PC ;\
336 .set noat ; \
337 RESTORE_REG(AT, AST, sp) ;\
338 RESTORE_REG(v0, V0, sp) ;\
339 RESTORE_REG(v1, V1, sp) ;\
340 RESTORE_REG(a0, A0, sp) ;\
341 RESTORE_REG(a1, A1, sp) ;\
342 RESTORE_REG(a2, A2, sp) ;\
343 RESTORE_REG(a3, A3, sp) ;\
344 RESTORE_REG(t0, T0, sp) ;\
345 RESTORE_REG(t1, T1, sp) ;\
346 RESTORE_REG(t2, T2, sp) ;\
347 RESTORE_REG(t3, T3, sp) ;\
348 RESTORE_REG(t4, T4, sp) ;\
349 RESTORE_REG(t5, T5, sp) ;\
350 RESTORE_REG(t6, T6, sp) ;\
351 RESTORE_REG(t7, T7, sp) ;\
352 RESTORE_REG(t8, T8, sp) ;\
353 RESTORE_REG(t9, T9, sp) ;\
354 RESTORE_REG(s0, S0, sp) ;\
355 RESTORE_REG(s1, S1, sp) ;\
356 RESTORE_REG(s2, S2, sp) ;\
357 RESTORE_REG(s3, S3, sp) ;\
358 RESTORE_REG(s4, S4, sp) ;\
359 RESTORE_REG(s5, S5, sp) ;\
360 RESTORE_REG(s6, S6, sp) ;\
361 RESTORE_REG(s7, S7, sp) ;\
362 RESTORE_REG(s8, S8, sp) ;\
363 RESTORE_REG(gp, GP, sp) ;\
364 RESTORE_REG(ra, RA, sp) ;\
365 addu sp, sp, KERN_EXC_FRAME_SIZE
366
367
368 /*
369 * The kernel exception stack contains 18 saved general registers,
370 * the status register and the multiply lo and high registers.
371 * In addition, we set this up for linkage conventions.
372 */
373 #define KERN_REG_SIZE (NUMSAVEREGS * RSIZE)
374 #define KERN_EXC_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 16)
375
376 NNON_LEAF(MipsKernGenException, KERN_EXC_FRAME_SIZE, ra)
377 .set noat
378 subu sp, sp, KERN_EXC_FRAME_SIZE
379 .mask 0x80000000, (STAND_RA_OFFSET - KERN_EXC_FRAME_SIZE)
380 /*
381 * Save CPU state, building 'frame'.
382 */
383 SAVE_CPU
384 /*
385 * Call the exception handler. a0 points at the saved frame.
386 */
387 la gp, _C_LABEL(_gp)
388 la k0, _C_LABEL(trap)
389 jalr k0
390 sw a3, STAND_RA_OFFSET + KERN_REG_SIZE(sp) # for debugging
391
392 RESTORE_CPU # v0 contains the return address.
393 sync
394 eret
395 .set at
396 END(MipsKernGenException)
397
398
399 #define SAVE_U_PCB_REG(reg, offs, base) \
400 STORE reg, U_PCB_REGS + (RSIZE * offs) (base)
401
402 #define RESTORE_U_PCB_REG(reg, offs, base) \
403 LOAD reg, U_PCB_REGS + (RSIZE * offs) (base)
404
405 /*----------------------------------------------------------------------------
406 *
407 * MipsUserGenException --
408 *
409 * Handle an exception from user mode.
410 *
411 * Results:
412 * None.
413 *
414 * Side effects:
415 * None.
416 *
417 *----------------------------------------------------------------------------
418 */
419 NNON_LEAF(MipsUserGenException, STAND_FRAME_SIZE, ra)
420 .set noat
421 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
422 /*
423 * Save all of the registers except for the kernel temporaries in u.u_pcb.
424 */
425 GET_CPU_PCPU(k1)
426 lw k1, PC_CURPCB(k1)
427 SAVE_U_PCB_REG(AT, AST, k1)
428 .set at
429 SAVE_U_PCB_REG(v0, V0, k1)
430 SAVE_U_PCB_REG(v1, V1, k1)
431 SAVE_U_PCB_REG(a0, A0, k1)
432 mflo v0
433 SAVE_U_PCB_REG(a1, A1, k1)
434 SAVE_U_PCB_REG(a2, A2, k1)
435 SAVE_U_PCB_REG(a3, A3, k1)
436 SAVE_U_PCB_REG(t0, T0, k1)
437 mfhi v1
438 SAVE_U_PCB_REG(t1, T1, k1)
439 SAVE_U_PCB_REG(t2, T2, k1)
440 SAVE_U_PCB_REG(t3, T3, k1)
441 SAVE_U_PCB_REG(t4, T4, k1)
442 mfc0 a0, COP_0_STATUS_REG # First arg is the status reg.
443 SAVE_U_PCB_REG(t5, T5, k1)
444 SAVE_U_PCB_REG(t6, T6, k1)
445 SAVE_U_PCB_REG(t7, T7, k1)
446 SAVE_U_PCB_REG(s0, S0, k1)
447 mfc0 a1, COP_0_CAUSE_REG # Second arg is the cause reg.
448 SAVE_U_PCB_REG(s1, S1, k1)
449 SAVE_U_PCB_REG(s2, S2, k1)
450 SAVE_U_PCB_REG(s3, S3, k1)
451 SAVE_U_PCB_REG(s4, S4, k1)
452 mfc0 a2, COP_0_BAD_VADDR # Third arg is the fault addr
453 SAVE_U_PCB_REG(s5, S5, k1)
454 SAVE_U_PCB_REG(s6, S6, k1)
455 SAVE_U_PCB_REG(s7, S7, k1)
456 SAVE_U_PCB_REG(t8, T8, k1)
457 mfc0 a3, COP_0_EXC_PC # Fourth arg is the pc.
458 SAVE_U_PCB_REG(t9, T9, k1)
459 SAVE_U_PCB_REG(gp, GP, k1)
460 SAVE_U_PCB_REG(sp, SP, k1)
461 SAVE_U_PCB_REG(s8, S8, k1)
462 subu sp, k1, STAND_FRAME_SIZE # switch to kernel SP
463 SAVE_U_PCB_REG(ra, RA, k1)
464 SAVE_U_PCB_REG(v0, MULLO, k1)
465 SAVE_U_PCB_REG(v1, MULHI, k1)
466 SAVE_U_PCB_REG(a0, SR, k1)
467 SAVE_U_PCB_REG(a1, CAUSE, k1)
468 SAVE_U_PCB_REG(a2, BADVADDR, k1)
469 SAVE_U_PCB_REG(a3, PC, k1)
470 sw a3, STAND_RA_OFFSET(sp) # for debugging
471 la gp, _C_LABEL(_gp) # switch to kernel GP
472 # Turn off fpu and enter kernel mode
473 and t0, a0, ~(SR_COP_1_BIT | SR_EXL | SR_KSU_MASK | SR_INT_ENAB)
474 #ifdef TARGET_OCTEON
475 or t0, t0, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
476 #endif
477 mtc0 t0, COP_0_STATUS_REG
478 addu a0, k1, U_PCB_REGS
479 ITLBNOPFIX
480
481 /*
482 * Call the exception handler.
483 */
484 la k0, _C_LABEL(trap)
485 jalr k0
486 nop
487 /*
488 * Restore user registers and return.
489 * First disable interrupts and set exeption level.
490 */
491 DO_AST
492
493 mtc0 zero, COP_0_STATUS_REG # disable int
494 ITLBNOPFIX
495 li v0, SR_EXL
496 mtc0 v0, COP_0_STATUS_REG # set exeption level
497 ITLBNOPFIX
498
499 /*
500 * The use of k1 for storing the PCB pointer must be done only
501 * after interrupts are disabled. Otherwise it will get overwritten
502 * by the interrupt code.
503 */
504 GET_CPU_PCPU(k1)
505 lw k1, PC_CURPCB(k1)
506
507 RESTORE_U_PCB_REG(t0, MULLO, k1)
508 RESTORE_U_PCB_REG(t1, MULHI, k1)
509 mtlo t0
510 mthi t1
511 RESTORE_U_PCB_REG(a0, PC, k1)
512 RESTORE_U_PCB_REG(v0, V0, k1)
513 _MTC0 a0, COP_0_EXC_PC # set return address
514 RESTORE_U_PCB_REG(v1, V1, k1)
515 RESTORE_U_PCB_REG(a0, A0, k1)
516 RESTORE_U_PCB_REG(a1, A1, k1)
517 RESTORE_U_PCB_REG(a2, A2, k1)
518 RESTORE_U_PCB_REG(a3, A3, k1)
519 RESTORE_U_PCB_REG(t0, T0, k1)
520 RESTORE_U_PCB_REG(t1, T1, k1)
521 RESTORE_U_PCB_REG(t2, T2, k1)
522 RESTORE_U_PCB_REG(t3, T3, k1)
523 RESTORE_U_PCB_REG(t4, T4, k1)
524 RESTORE_U_PCB_REG(t5, T5, k1)
525 RESTORE_U_PCB_REG(t6, T6, k1)
526 RESTORE_U_PCB_REG(t7, T7, k1)
527 RESTORE_U_PCB_REG(s0, S0, k1)
528 RESTORE_U_PCB_REG(s1, S1, k1)
529 RESTORE_U_PCB_REG(s2, S2, k1)
530 RESTORE_U_PCB_REG(s3, S3, k1)
531 RESTORE_U_PCB_REG(s4, S4, k1)
532 RESTORE_U_PCB_REG(s5, S5, k1)
533 RESTORE_U_PCB_REG(s6, S6, k1)
534 RESTORE_U_PCB_REG(s7, S7, k1)
535 RESTORE_U_PCB_REG(t8, T8, k1)
536 RESTORE_U_PCB_REG(t9, T9, k1)
537 RESTORE_U_PCB_REG(gp, GP, k1)
538 RESTORE_U_PCB_REG(sp, SP, k1)
539 RESTORE_U_PCB_REG(k0, SR, k1)
540 RESTORE_U_PCB_REG(s8, S8, k1)
541 RESTORE_U_PCB_REG(ra, RA, k1)
542 #ifdef TARGET_OCTEON
543 and k0, k0, ~(MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
544 #endif
545 or k0, k0, (MIPS_SR_INT_IE)
546 .set noat
547 RESTORE_U_PCB_REG(AT, AST, k1)
548
549 /*
550 * The restoration of the user SR must be done only after
551 * k1 is no longer needed. Otherwise, k1 will get clobbered after
552 * interrupts are enabled.
553 */
554 mtc0 k0, COP_0_STATUS_REG # still exeption level
555 ITLBNOPFIX
556 sync
557 eret
558 .set at
559 END(MipsUserGenException)
560
561 /*----------------------------------------------------------------------------
562 *
563 * MipsKernIntr --
564 *
565 * Handle an interrupt from kernel mode.
566 * Interrupts use the standard kernel stack.
567 * switch_exit sets up a kernel stack after exit so interrupts won't fail.
568 *
569 * Results:
570 * None.
571 *
572 * Side effects:
573 * None.
574 *
575 *----------------------------------------------------------------------------
576 */
577
578 NNON_LEAF(MipsKernIntr, KERN_EXC_FRAME_SIZE, ra)
579 .set noat
580 subu sp, sp, KERN_EXC_FRAME_SIZE
581 .mask 0x80000000, (STAND_RA_OFFSET - KERN_EXC_FRAME_SIZE)
582 /*
583 * Save the relevant kernel registers onto the stack.
584 */
585 SAVE_CPU
586
587 /*
588 * Call the interrupt handler.
589 */
590 la gp, _C_LABEL(_gp)
591 addu a0, sp, STAND_ARG_SIZE
592 la k0, _C_LABEL(cpu_intr)
593 jalr k0
594 sw a3, STAND_RA_OFFSET + KERN_REG_SIZE(sp)
595 /* Why no AST processing here? */
596 /*
597 * Restore registers and return from the interrupt.
598 */
599 lw v0, STAND_RA_OFFSET + KERN_REG_SIZE(sp)
600 RESTORE_CPU
601 sync
602 eret
603 .set at
604 END(MipsKernIntr)
605
606 /*----------------------------------------------------------------------------
607 *
608 * MipsUserIntr --
609 *
610 * Handle an interrupt from user mode.
611 * Note: we save minimal state in the u.u_pcb struct and use the standard
612 * kernel stack since there has to be a u page if we came from user mode.
613 * If there is a pending software interrupt, then save the remaining state
614 * and call softintr(). This is all because if we call switch() inside
615 * interrupt(), not all the user registers have been saved in u.u_pcb.
616 *
617 * Results:
618 * None.
619 *
620 * Side effects:
621 * None.
622 *
623 *----------------------------------------------------------------------------
624 */
625 NNON_LEAF(MipsUserIntr, STAND_FRAME_SIZE, ra)
626 .set noat
627 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
628 /*
629 * Save the relevant user registers into the u.u_pcb struct.
630 * We don't need to save s0 - s8 because the compiler does it for us.
631 */
632 GET_CPU_PCPU(k1)
633 lw k1, PC_CURPCB(k1)
634 SAVE_U_PCB_REG(AT, AST, k1)
635 .set at
636 SAVE_U_PCB_REG(v0, V0, k1)
637 SAVE_U_PCB_REG(v1, V1, k1)
638 SAVE_U_PCB_REG(a0, A0, k1)
639 SAVE_U_PCB_REG(a1, A1, k1)
640 SAVE_U_PCB_REG(a2, A2, k1)
641 SAVE_U_PCB_REG(a3, A3, k1)
642 SAVE_U_PCB_REG(t0, T0, k1)
643 SAVE_U_PCB_REG(t1, T1, k1)
644 SAVE_U_PCB_REG(t2, T2, k1)
645 SAVE_U_PCB_REG(t3, T3, k1)
646 SAVE_U_PCB_REG(t4, T4, k1)
647 SAVE_U_PCB_REG(t5, T5, k1)
648 SAVE_U_PCB_REG(t6, T6, k1)
649 SAVE_U_PCB_REG(t7, T7, k1)
650 SAVE_U_PCB_REG(t8, T8, k1)
651 SAVE_U_PCB_REG(t9, T9, k1)
652 SAVE_U_PCB_REG(gp, GP, k1)
653 SAVE_U_PCB_REG(sp, SP, k1)
654 SAVE_U_PCB_REG(ra, RA, k1)
655 /*
656 * save remaining user state in u.u_pcb.
657 */
658 SAVE_U_PCB_REG(s0, S0, k1)
659 SAVE_U_PCB_REG(s1, S1, k1)
660 SAVE_U_PCB_REG(s2, S2, k1)
661 SAVE_U_PCB_REG(s3, S3, k1)
662 SAVE_U_PCB_REG(s4, S4, k1)
663 SAVE_U_PCB_REG(s5, S5, k1)
664 SAVE_U_PCB_REG(s6, S6, k1)
665 SAVE_U_PCB_REG(s7, S7, k1)
666 SAVE_U_PCB_REG(s8, S8, k1)
667
668 mflo v0 # get lo/hi late to avoid stall
669 mfhi v1
670 mfc0 a0, COP_0_STATUS_REG
671 mfc0 a1, COP_0_CAUSE_REG
672 mfc0 a3, COP_0_EXC_PC
673 SAVE_U_PCB_REG(v0, MULLO, k1)
674 SAVE_U_PCB_REG(v1, MULHI, k1)
675 SAVE_U_PCB_REG(a0, SR, k1)
676 SAVE_U_PCB_REG(a1, CAUSE, k1)
677 SAVE_U_PCB_REG(a3, PC, k1) # PC in a3, note used later!
678 subu sp, k1, STAND_FRAME_SIZE # switch to kernel SP
679 la gp, _C_LABEL(_gp) # switch to kernel GP
680
681 # Turn off fpu, disable interrupts, set kernel mode kernel mode, clear exception level.
682 and t0, a0, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
683 #ifdef TARGET_OCTEON
684 or t0, t0, (MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
685 #endif
686 mtc0 t0, COP_0_STATUS_REG
687 ITLBNOPFIX
688 addu a0, k1, U_PCB_REGS
689 /*
690 * Call the interrupt handler.
691 */
692 la k0, _C_LABEL(cpu_intr)
693 jalr k0
694 sw a3, STAND_RA_OFFSET(sp) # for debugging
695 /*
696 * Since interrupts are enabled at this point, we use a1 instead of
697 * k0 or k1 to store the PCB pointer. This is because k0 and k1
698 * are not preserved across interrupts. ** RRS - And how did the
699 * get enabled? cpu_intr clears the cause register but it does
700 * not touch the sr as far as I can see thus intr are still
701 * disabled.
702 */
703 DO_AST
704
705 /*
706 * Restore user registers and return. NOTE: interrupts are enabled.
707 */
708
709 /*
710 * Since interrupts are enabled at this point, we use a1 instead of
711 * k0 or k1 to store the PCB pointer. This is because k0 and k1
712 * are not preserved across interrupts.
713 */
714 mtc0 zero, COP_0_STATUS_REG
715 ITLBNOPFIX
716 li v0, SR_EXL
717 mtc0 v0, COP_0_STATUS_REG # set exeption level bit.
718 ITLBNOPFIX
719
720 GET_CPU_PCPU(k1)
721 lw a1, PC_CURPCB(k1)
722 RESTORE_U_PCB_REG(s0, S0, k1)
723 RESTORE_U_PCB_REG(s1, S1, k1)
724 RESTORE_U_PCB_REG(s2, S2, k1)
725 RESTORE_U_PCB_REG(s3, S3, k1)
726 RESTORE_U_PCB_REG(s4, S4, k1)
727 RESTORE_U_PCB_REG(s5, S5, k1)
728 RESTORE_U_PCB_REG(s6, S6, k1)
729 RESTORE_U_PCB_REG(s7, S7, k1)
730 RESTORE_U_PCB_REG(s8, S8, k1)
731 RESTORE_U_PCB_REG(t0, MULLO, k1)
732 RESTORE_U_PCB_REG(t1, MULHI, k1)
733 RESTORE_U_PCB_REG(t2, PC, k1)
734 mtlo t0
735 mthi t1
736 _MTC0 t2, COP_0_EXC_PC # set return address
737 RESTORE_U_PCB_REG(v0, V0, k1)
738 RESTORE_U_PCB_REG(v1, V1, k1)
739 RESTORE_U_PCB_REG(a0, A0, k1)
740 RESTORE_U_PCB_REG(a1, A1, k1)
741 RESTORE_U_PCB_REG(a2, A2, k1)
742 RESTORE_U_PCB_REG(a3, A3, k1)
743 RESTORE_U_PCB_REG(t0, T0, k1)
744 RESTORE_U_PCB_REG(t1, T1, k1)
745 RESTORE_U_PCB_REG(t2, T2, k1)
746 RESTORE_U_PCB_REG(t3, T3, k1)
747 RESTORE_U_PCB_REG(t4, T4, k1)
748 RESTORE_U_PCB_REG(t5, T5, k1)
749 RESTORE_U_PCB_REG(t6, T6, k1)
750 RESTORE_U_PCB_REG(t7, T7, k1)
751 RESTORE_U_PCB_REG(t8, T8, k1)
752 RESTORE_U_PCB_REG(t9, T9, k1)
753 RESTORE_U_PCB_REG(gp, GP, k1)
754 RESTORE_U_PCB_REG(k0, SR, k1)
755 RESTORE_U_PCB_REG(sp, SP, k1)
756 RESTORE_U_PCB_REG(ra, RA, k1)
757 #ifdef TARGET_OCTEON
758 and k0, k0, ~(MIPS_SR_KX | MIPS_SR_SX | MIPS_SR_UX)
759 #endif
760 or k0, k0, (MIPS_SR_INT_IE|SR_EXL)
761 .set noat
762 RESTORE_U_PCB_REG(AT, AST, k1)
763
764 /*
765 * The restoration of the user SR must be done only after
766 * k1 is no longer needed. Otherwise, k1 will get clobbered after
767 * interrupts are enabled.
768 */
769 mtc0 k0, COP_0_STATUS_REG # SR with EXL set.
770 ITLBNOPFIX
771 sync
772 eret
773 .set at
774 END(MipsUserIntr)
775
776 /*----------------------------------------------------------------------------
777 *
778 * MipsTLBInvalidException --
779 *
780 * Handle a TLB invalid exception.
781 * The BaddVAddr, Context, and EntryHi registers contain the failed
782 * virtual address.
783 *
784 * Results:
785 * None.
786 *
787 * Side effects:
788 * None.
789 *
790 *----------------------------------------------------------------------------
791 */
792 NLEAF(MipsTLBInvalidException)
793 .set noat
794 mfc0 k0, COP_0_STATUS_REG
795 nop
796 and k0, k0, SR_KSU_USER
797 bne k0, zero, _C_LABEL(MipsUserTLBInvalidException)
798 nop
799 .set at
800 END(MipsTLBInvalidException)
801 /*
802 * Fall through ...
803 */
804
805 NLEAF(MipsKernTLBInvalidException)
806 .set noat
807 mfc0 k0, COP_0_BAD_VADDR # get the fault address
808
809
810 li k1, VM_MAXUSER_ADDRESS
811 sltu k1, k0, k1
812 beqz k1, 1f
813 nop
814 #ifdef SMP
815 GET_CPU_PCPU(k1)
816 #else
817 lui k1, %hi(_C_LABEL(pcpup))
818 lw k1, %lo(_C_LABEL(pcpup))(k1)
819 #endif
820 lw k1, PC_SEGBASE(k1) # works for single cpu????
821 beqz k1, _C_LABEL(MipsKernGenException) # seg tab is null
822 nop
823 b 2f
824 nop
825 1:
826 li k1, (VM_MAX_KERNEL_ADDRESS)
827 bgez k0, _C_LABEL(MipsKernGenException) # full trap processing
828 sltu k1, k1, k0 # check fault address against
829 bnez k1, _C_LABEL(MipsKernGenException) # kernel_segmap upper bound
830 lui k1, %hi(_C_LABEL(kernel_segmap)) # k1=hi of segbase
831 lw k1, %lo(_C_LABEL(kernel_segmap))(k1) # k1=segment tab base
832 beqz k1, _C_LABEL(MipsKernGenException) # seg tab is null
833 2:
834 srl k0, 20 # k0=seg offset (almost)
835 andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
836 addu k1, k0, k1 # k1=seg entry address
837 lw k1, 0(k1) # k1=seg entry
838 mfc0 k0, COP_0_BAD_VADDR # k0=bad address (again)
839 beq k1, zero, _C_LABEL(MipsKernGenException) # ==0 -- no page table
840 srl k0, k0, PGSHIFT-2
841 andi k0, k0, 0xffc # compute offset from index
842 tlbp # Probe the invalid entry
843 addu k1, k1, k0
844 and k0, k0, 4 # check even/odd page
845 nop # required for QED 5230
846 bne k0, zero, KernTLBIOdd
847 nop
848
849 mfc0 k0, COP_0_TLB_INDEX
850 nop
851 bltz k0, sys_stk_chk
852
853 sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
854 bne k0, zero, sys_stk_chk
855 lw k0, 0(k1) # get PTE entry
856
857 _SLL k0, k0, WIRED_SHIFT # get rid of "wired" bit
858 _SRL k0, k0, WIRED_SHIFT
859 _MTC0 k0, COP_0_TLB_LO0 # load PTE entry
860 and k0, k0, PTE_V # check for valid entry
861 nop # required for QED5230
862 beq k0, zero, _C_LABEL(MipsKernGenException) # PTE invalid
863 lw k0, 4(k1) # get odd PTE entry
864 _SLL k0, k0, WIRED_SHIFT
865 _SRL k0, k0, WIRED_SHIFT
866 _MTC0 k0, COP_0_TLB_LO1 # load PTE entry
867 HAZARD_DELAY
868 tlbwi # write TLB
869 HAZARD_DELAY
870 eret
871
872 KernTLBIOdd:
873 mfc0 k0, COP_0_TLB_INDEX
874 nop
875 bltz k0, sys_stk_chk
876
877 sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
878 bne k0, zero, sys_stk_chk
879 lw k0, 0(k1) # get PTE entry
880
881 _SLL k0, k0, WIRED_SHIFT # get rid of wired bit
882 _SRL k0, k0, WIRED_SHIFT
883 _MTC0 k0, COP_0_TLB_LO1 # save PTE entry
884 and k0, k0, PTE_V # check for valid entry
885 nop # required for QED5230
886 beq k0, zero, _C_LABEL(MipsKernGenException) # PTE invalid
887 lw k0, -4(k1) # get even PTE entry
888 _SLL k0, k0, WIRED_SHIFT
889 _SRL k0, k0, WIRED_SHIFT
890 _MTC0 k0, COP_0_TLB_LO0 # save PTE entry
891 HAZARD_DELAY
892 tlbwi # update TLB
893 HAZARD_DELAY
894 eret
895
896 .set at
897 END(MipsKernTLBInvalidException)
898
899
900 NLEAF(MipsUserTLBInvalidException)
901 .set noat
902 mfc0 k0, COP_0_BAD_VADDR # get the fault address
903
904 li k1, VM_MAXUSER_ADDRESS
905 sltu k1, k0, k1
906 beqz k1, _C_LABEL(MipsUserGenException)
907 nop
908 #ifdef SMP
909 GET_CPU_PCPU(k1)
910 #else
911 lui k1, %hi(_C_LABEL(pcpup))
912 lw k1, %lo(_C_LABEL(pcpup))(k1)
913 #endif
914 lw k1, PC_SEGBASE(k1) # works for single cpu????
915 beqz k1, _C_LABEL(MipsUserGenException) # seg tab is null
916 nop
917 2:
918 srl k0, 20 # k0=seg offset (almost)
919 andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
920 addu k1, k0, k1 # k1=seg entry address
921 lw k1, 0(k1) # k1=seg entry
922 mfc0 k0, COP_0_BAD_VADDR # k0=bad address (again)
923 beq k1, zero, _C_LABEL(MipsUserGenException) # ==0 -- no page table
924 srl k0, k0, PGSHIFT-2
925 andi k0, k0, 0xffc # compute offset from index
926 tlbp # Probe the invalid entry
927 addu k1, k1, k0
928 and k0, k0, 4 # check even/odd page
929 nop # required for QED 5230
930 bne k0, zero, UserTLBIOdd
931 nop
932
933 mfc0 k0, COP_0_TLB_INDEX
934 nop
935 bltz k0, _C_LABEL(MipsUserGenException)
936
937 sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
938 bne k0, zero, _C_LABEL(MipsUserGenException)
939 lw k0, 0(k1) # get PTE entry
940
941 _SLL k0, k0, WIRED_SHIFT # get rid of "wired" bit
942 _SRL k0, k0, WIRED_SHIFT
943 _MTC0 k0, COP_0_TLB_LO0 # load PTE entry
944 and k0, k0, PTE_V # check for valid entry
945 nop # required for QED5230
946 beq k0, zero, _C_LABEL(MipsUserGenException) # PTE invalid
947 lw k0, 4(k1) # get odd PTE entry
948 _SLL k0, k0, WIRED_SHIFT
949 _SRL k0, k0, WIRED_SHIFT
950 _MTC0 k0, COP_0_TLB_LO1 # load PTE entry
951 HAZARD_DELAY
952 tlbwi # write TLB
953 HAZARD_DELAY
954 eret
955
956 UserTLBIOdd:
957 mfc0 k0, COP_0_TLB_INDEX
958 nop
959 bltz k0, _C_LABEL(MipsUserGenException)
960 sltiu k0, k0, VMWIRED_ENTRIES # index below wired entries?
961
962 bne k0, zero, _C_LABEL(MipsUserGenException)
963 lw k0, 0(k1) # get PTE entry
964
965 _SLL k0, k0, WIRED_SHIFT # get rid of wired bit
966 _SRL k0, k0, WIRED_SHIFT
967 _MTC0 k0, COP_0_TLB_LO1 # save PTE entry
968 and k0, k0, PTE_V # check for valid entry
969 nop # required for QED5230
970 beq k0, zero, _C_LABEL(MipsUserGenException) # PTE invalid
971 lw k0, -4(k1) # get even PTE entry
972 _SLL k0, k0, WIRED_SHIFT
973 _SRL k0, k0, WIRED_SHIFT
974 _MTC0 k0, COP_0_TLB_LO0 # save PTE entry
975 HAZARD_DELAY
976 tlbwi # update TLB
977 HAZARD_DELAY
978 eret
979
980 .set at
981 END(MipsUserTLBInvalidException)
982
983 /*----------------------------------------------------------------------------
984 *
985 * MipsTLBMissException --
986 *
987 * Handle a TLB miss exception from kernel mode in kernel space.
988 * The BaddVAddr, Context, and EntryHi registers contain the failed
989 * virtual address.
990 *
991 * Results:
992 * None.
993 *
994 * Side effects:
995 * None.
996 *
997 *----------------------------------------------------------------------------
998 */
999 NLEAF(MipsTLBMissException)
1000 .set noat
1001 mfc0 k0, COP_0_BAD_VADDR # k0=bad address
1002 li k1, (VM_MAX_KERNEL_ADDRESS) # check fault address against
1003 sltu k1, k1, k0 # upper bound of kernel_segmap
1004 bnez k1, _C_LABEL(MipsKernGenException) # out of bound
1005 lui k1, %hi(_C_LABEL(kernel_segmap)) # k1=hi of segbase
1006 srl k0, 20 # k0=seg offset (almost)
1007 lw k1, %lo(_C_LABEL(kernel_segmap))(k1) # k1=segment tab base
1008 beq k1, zero, _C_LABEL(MipsKernGenException) # ==0 -- no seg tab
1009 andi k0, k0, 0xffc # k0=seg offset (mask 0x3)
1010 addu k1, k0, k1 # k1=seg entry address
1011 lw k1, 0(k1) # k1=seg entry
1012 mfc0 k0, COP_0_BAD_VADDR # k0=bad address (again)
1013 beq k1, zero, _C_LABEL(MipsKernGenException) # ==0 -- no page table
1014 srl k0, 10 # k0=VPN (aka va>>10)
1015 andi k0, k0, 0xff8 # k0=page tab offset
1016 addu k1, k1, k0 # k1=pte address
1017 lw k0, 0(k1) # k0=lo0 pte
1018 lw k1, 4(k1) # k1=lo1 pte
1019 _SLL k0, WIRED_SHIFT # chop bits [31..30]
1020 _SRL k0, WIRED_SHIFT # chop bits [31..30]
1021 _MTC0 k0, COP_0_TLB_LO0 # lo0 is loaded
1022 _SLL k1, WIRED_SHIFT # chop bits [31..30]
1023 _SRL k1, WIRED_SHIFT # chop bits [31..30]
1024 _MTC0 k1, COP_0_TLB_LO1 # lo1 is loaded
1025
1026 HAZARD_DELAY
1027 tlbwr # write to tlb
1028 HAZARD_DELAY
1029 eret # return from exception
1030
1031 sys_stk_chk:
1032 GET_CPU_PCPU(k0)
1033 lw k0, PC_CURTHREAD(k0)
1034 lw k0, TD_REALKSTACK(k0)
1035 sltu k0, sp, k0 # check for stack overflow
1036 beqz k0, _C_LABEL(MipsKernGenException) # not stack overflow
1037 nop
1038
1039 # stack overflow
1040 la a0, _C_LABEL(_start) - START_FRAME - 8 # set sp to a valid place
1041 sw sp, 24(a0)
1042 move sp, a0
1043 la a0, 1f
1044 mfc0 a2, COP_0_STATUS_REG
1045 mfc0 a3, COP_0_CAUSE_REG
1046 _MFC0 a1, COP_0_EXC_PC
1047 sw a2, 16(sp)
1048 sw a3, 20(sp)
1049 move a2, ra
1050 la k0, _C_LABEL(printf)
1051 jalr k0
1052 mfc0 a3, COP_0_BAD_VADDR
1053
1054 la sp, _C_LABEL(_start) - START_FRAME # set sp to a valid place
1055
1056 #if !defined(SMP) && defined(DDB)
1057 la a0, 2f
1058 la k0, _C_LABEL(trapDump)
1059 jalr k0
1060 nop
1061
1062 li a0, 0
1063 lw a1, _C_LABEL(num_tlbentries)
1064 la k0, _C_LABEL(db_dump_tlb)
1065 jalr k0
1066 addu a1, -1
1067
1068 3:
1069 b 3b
1070 nop
1071 #endif
1072
1073 PANIC("kernel stack overflow")
1074
1075 .data
1076 .globl lastktlbmiss
1077 lastktlbmiss:
1078 .word 0
1079 lastktlbmisspc:
1080 .word 0
1081 lastutlbmiss:
1082 .word 0
1083 lastutlbmisspc:
1084 .word 0
1085
1086 1:
1087 .asciiz "ktlbmiss: PC %x RA %x ADR %x\nSR %x CR %x SP %x\n"
1088 2:
1089 .asciiz "stack ovf"
1090 .text
1091
1092 .set at
1093 END(MipsTLBMissException)
1094
1095 /*----------------------------------------------------------------------------
1096 *
1097 * MipsFPTrap --
1098 *
1099 * Handle a floating point Trap.
1100 *
1101 * MipsFPTrap(statusReg, causeReg, pc)
1102 * unsigned statusReg;
1103 * unsigned causeReg;
1104 * unsigned pc;
1105 *
1106 * Results:
1107 * None.
1108 *
1109 * Side effects:
1110 * None.
1111 *
1112 *----------------------------------------------------------------------------
1113 */
1114 NON_LEAF(MipsFPTrap, STAND_FRAME_SIZE, ra)
1115 subu sp, sp, STAND_FRAME_SIZE
1116 mfc0 t0, COP_0_STATUS_REG
1117 sw ra, STAND_RA_OFFSET(sp)
1118 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
1119
1120 or t1, t0, SR_COP_1_BIT
1121 mtc0 t1, COP_0_STATUS_REG
1122 ITLBNOPFIX
1123 cfc1 t1, FPC_CSR # stall til FP done
1124 cfc1 t1, FPC_CSR # now get status
1125 nop
1126 sll t2, t1, (31 - 17) # unimplemented operation?
1127 bgez t2, 3f # no, normal trap
1128 nop
1129 /*
1130 * We got an unimplemented operation trap so
1131 * fetch the instruction, compute the next PC and emulate the instruction.
1132 */
1133 bgez a1, 1f # Check the branch delay bit.
1134 nop
1135 /*
1136 * The instruction is in the branch delay slot so the branch will have to
1137 * be emulated to get the resulting PC.
1138 */
1139 sw a2, STAND_FRAME_SIZE + 8(sp)
1140 GET_CPU_PCPU(a0)
1141 lw a0, PC_CURPCB(a0)
1142 addu a0, a0, U_PCB_REGS # first arg is ptr to CPU registers
1143 move a1, a2 # second arg is instruction PC
1144 move a2, t1 # third arg is floating point CSR
1145 la t3, _C_LABEL(MipsEmulateBranch) # compute PC after branch
1146 jalr t3 # compute PC after branch
1147 move a3, zero # fourth arg is FALSE
1148 /*
1149 * Now load the floating-point instruction in the branch delay slot
1150 * to be emulated.
1151 */
1152 lw a2, STAND_FRAME_SIZE + 8(sp) # restore EXC pc
1153 b 2f
1154 lw a0, 4(a2) # a0 = coproc instruction
1155 /*
1156 * This is not in the branch delay slot so calculate the resulting
1157 * PC (epc + 4) into v0 and continue to MipsEmulateFP().
1158 */
1159 1:
1160 lw a0, 0(a2) # a0 = coproc instruction
1161 addu v0, a2, 4 # v0 = next pc
1162 2:
1163 GET_CPU_PCPU(t2)
1164 lw t2, PC_CURPCB(t2)
1165 SAVE_U_PCB_REG(v0, PC, t2) # save new pc
1166 /*
1167 * Check to see if the instruction to be emulated is a floating-point
1168 * instruction.
1169 */
1170 srl a3, a0, OPCODE_SHIFT
1171 beq a3, OPCODE_C1, 4f # this should never fail
1172 nop
1173 /*
1174 * Send a floating point exception signal to the current process.
1175 */
1176 3:
1177 GET_CPU_PCPU(a0)
1178 lw a0, PC_CURTHREAD(a0) # get current thread
1179 cfc1 a2, FPC_CSR # code = FP execptions
1180 ctc1 zero, FPC_CSR # Clear exceptions
1181 la t3, _C_LABEL(trapsignal)
1182 jalr t3
1183 li a1, SIGFPE
1184 b FPReturn
1185 nop
1186
1187 /*
1188 * Finally, we can call MipsEmulateFP() where a0 is the instruction to emulate.
1189 */
1190 4:
1191 la t3, _C_LABEL(MipsEmulateFP)
1192 jalr t3
1193 nop
1194
1195 /*
1196 * Turn off the floating point coprocessor and return.
1197 */
1198 FPReturn:
1199 mfc0 t0, COP_0_STATUS_REG
1200 lw ra, STAND_RA_OFFSET(sp)
1201 and t0, t0, ~SR_COP_1_BIT
1202 mtc0 t0, COP_0_STATUS_REG
1203 ITLBNOPFIX
1204 j ra
1205 addu sp, sp, STAND_FRAME_SIZE
1206 END(MipsFPTrap)
1207
1208
1209 #if 0
1210 /*
1211 * Atomic ipending update
1212 */
1213 LEAF(set_sint)
1214 la v1, ipending
1215 1:
1216 ll v0, 0(v1)
1217 or v0, a0
1218 sc v0, 0(v1)
1219 beqz v0, 1b
1220 j ra
1221 nop
1222 END(set_sint)
1223 #endif
1224
1225 /*
1226 * Interrupt counters for vmstat.
1227 */
1228 .data
1229 .globl intrcnt
1230 .globl eintrcnt
1231 .globl intrnames
1232 .globl eintrnames
1233 intrnames:
1234 .asciiz "clock"
1235 .asciiz "rtc"
1236 .asciiz "sio"
1237 .asciiz "pe"
1238 .asciiz "pic-nic"
1239 eintrnames:
1240 .align 2
1241 intrcnt:
1242 .word 0,0,0,0,0
1243 eintrcnt:
1244
1245
1246 /*
1247 * Vector to real handler in KSEG1.
1248 */
1249 .text
1250 VECTOR(MipsCache, unknown)
1251 la k0, _C_LABEL(MipsCacheException)
1252 li k1, MIPS_PHYS_MASK
1253 and k0, k1
1254 li k1, MIPS_UNCACHED_MEMORY_ADDR
1255 or k0, k1
1256 j k0
1257 nop
1258 VECTOR_END(MipsCache)
1259
1260 .set at
1261
1262
1263 /*
1264 * Panic on cache errors. A lot more could be done to recover
1265 * from some types of errors but it is tricky.
1266 */
1267 NESTED_NOPROFILE(MipsCacheException, KERN_EXC_FRAME_SIZE, ra)
1268 .set noat
1269 .mask 0x80000000, -4
1270 la k0, _C_LABEL(panic) # return to panic
1271 la a0, 9f # panicstr
1272 _MFC0 a1, COP_0_ERROR_PC
1273 mfc0 a2, COP_0_CACHE_ERR # 3rd arg cache error
1274
1275 _MTC0 k0, COP_0_ERROR_PC # set return address
1276
1277 mfc0 k0, COP_0_STATUS_REG # restore status
1278 li k1, SR_DIAG_DE # ignore further errors
1279 or k0, k1
1280 mtc0 k0, COP_0_STATUS_REG # restore status
1281 HAZARD_DELAY
1282
1283 eret
1284
1285 MSG("cache error @ EPC 0x%x CachErr 0x%x");
1286 .set at
1287 END(MipsCacheException)
Cache object: d3b2b08389ace6cc9a6f74c65ef6edf1
|