FreeBSD/Linux Kernel Cross Reference
sys/mips/mips/trap.c
1 /* $OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $ */
2 /* tracked to 1.23 */
3 /*-
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department and Ralph Campbell.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: Utah Hdr: trap.c 1.32 91/04/06
37 *
38 * from: @(#)trap.c 8.5 (Berkeley) 1/11/94
39 * JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish
40 */
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD: releng/11.2/sys/mips/mips/trap.c 331722 2018-03-29 02:50:57Z eadler $");
43
44 #include "opt_compat.h"
45 #include "opt_ddb.h"
46 #include "opt_ktrace.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/sysent.h>
51 #include <sys/proc.h>
52 #include <sys/kernel.h>
53 #include <sys/signalvar.h>
54 #include <sys/syscall.h>
55 #include <sys/lock.h>
56 #include <vm/vm.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_param.h>
62 #include <sys/vmmeter.h>
63 #include <sys/ptrace.h>
64 #include <sys/user.h>
65 #include <sys/buf.h>
66 #include <sys/vnode.h>
67 #include <sys/pioctl.h>
68 #include <sys/sysctl.h>
69 #include <sys/syslog.h>
70 #include <sys/bus.h>
71 #ifdef KTRACE
72 #include <sys/ktrace.h>
73 #endif
74 #include <net/netisr.h>
75
76 #include <machine/trap.h>
77 #include <machine/cpu.h>
78 #include <machine/pte.h>
79 #include <machine/pmap.h>
80 #include <machine/md_var.h>
81 #include <machine/mips_opcode.h>
82 #include <machine/frame.h>
83 #include <machine/regnum.h>
84 #include <machine/tls.h>
85
86 #ifdef DDB
87 #include <machine/db_machdep.h>
88 #include <ddb/db_sym.h>
89 #include <ddb/ddb.h>
90 #include <sys/kdb.h>
91 #endif
92
93 #ifdef KDTRACE_HOOKS
94 #include <sys/dtrace_bsd.h>
95 #endif
96
97 #ifdef TRAP_DEBUG
98 int trap_debug = 0;
99 SYSCTL_INT(_machdep, OID_AUTO, trap_debug, CTLFLAG_RW,
100 &trap_debug, 0, "Debug information on all traps");
101 #endif
102
103 #define lbu_macro(data, addr) \
104 __asm __volatile ("lbu %0, 0x0(%1)" \
105 : "=r" (data) /* outputs */ \
106 : "r" (addr)); /* inputs */
107
108 #define lb_macro(data, addr) \
109 __asm __volatile ("lb %0, 0x0(%1)" \
110 : "=r" (data) /* outputs */ \
111 : "r" (addr)); /* inputs */
112
113 #define lwl_macro(data, addr) \
114 __asm __volatile ("lwl %0, 0x0(%1)" \
115 : "=r" (data) /* outputs */ \
116 : "r" (addr)); /* inputs */
117
118 #define lwr_macro(data, addr) \
119 __asm __volatile ("lwr %0, 0x0(%1)" \
120 : "=r" (data) /* outputs */ \
121 : "r" (addr)); /* inputs */
122
123 #define ldl_macro(data, addr) \
124 __asm __volatile ("ldl %0, 0x0(%1)" \
125 : "=r" (data) /* outputs */ \
126 : "r" (addr)); /* inputs */
127
128 #define ldr_macro(data, addr) \
129 __asm __volatile ("ldr %0, 0x0(%1)" \
130 : "=r" (data) /* outputs */ \
131 : "r" (addr)); /* inputs */
132
133 #define sb_macro(data, addr) \
134 __asm __volatile ("sb %0, 0x0(%1)" \
135 : /* outputs */ \
136 : "r" (data), "r" (addr)); /* inputs */
137
138 #define swl_macro(data, addr) \
139 __asm __volatile ("swl %0, 0x0(%1)" \
140 : /* outputs */ \
141 : "r" (data), "r" (addr)); /* inputs */
142
143 #define swr_macro(data, addr) \
144 __asm __volatile ("swr %0, 0x0(%1)" \
145 : /* outputs */ \
146 : "r" (data), "r" (addr)); /* inputs */
147
148 #define sdl_macro(data, addr) \
149 __asm __volatile ("sdl %0, 0x0(%1)" \
150 : /* outputs */ \
151 : "r" (data), "r" (addr)); /* inputs */
152
153 #define sdr_macro(data, addr) \
154 __asm __volatile ("sdr %0, 0x0(%1)" \
155 : /* outputs */ \
156 : "r" (data), "r" (addr)); /* inputs */
157
158 static void log_illegal_instruction(const char *, struct trapframe *);
159 static void log_bad_page_fault(char *, struct trapframe *, int);
160 static void log_frame_dump(struct trapframe *frame);
161 static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **);
162
163 int (*dtrace_invop_jump_addr)(struct trapframe *);
164
165 #ifdef TRAP_DEBUG
166 static void trap_frame_dump(struct trapframe *frame);
167 #endif
168
169 void (*machExceptionTable[]) (void)= {
170 /*
171 * The kernel exception handlers.
172 */
173 MipsKernIntr, /* external interrupt */
174 MipsKernGenException, /* TLB modification */
175 MipsTLBInvalidException,/* TLB miss (load or instr. fetch) */
176 MipsTLBInvalidException,/* TLB miss (store) */
177 MipsKernGenException, /* address error (load or I-fetch) */
178 MipsKernGenException, /* address error (store) */
179 MipsKernGenException, /* bus error (I-fetch) */
180 MipsKernGenException, /* bus error (load or store) */
181 MipsKernGenException, /* system call */
182 MipsKernGenException, /* breakpoint */
183 MipsKernGenException, /* reserved instruction */
184 MipsKernGenException, /* coprocessor unusable */
185 MipsKernGenException, /* arithmetic overflow */
186 MipsKernGenException, /* trap exception */
187 MipsKernGenException, /* virtual coherence exception inst */
188 MipsKernGenException, /* floating point exception */
189 MipsKernGenException, /* reserved */
190 MipsKernGenException, /* reserved */
191 MipsKernGenException, /* reserved */
192 MipsKernGenException, /* reserved */
193 MipsKernGenException, /* reserved */
194 MipsKernGenException, /* reserved */
195 MipsKernGenException, /* reserved */
196 MipsKernGenException, /* watch exception */
197 MipsKernGenException, /* reserved */
198 MipsKernGenException, /* reserved */
199 MipsKernGenException, /* reserved */
200 MipsKernGenException, /* reserved */
201 MipsKernGenException, /* reserved */
202 MipsKernGenException, /* reserved */
203 MipsKernGenException, /* reserved */
204 MipsKernGenException, /* virtual coherence exception data */
205 /*
206 * The user exception handlers.
207 */
208 MipsUserIntr, /* 0 */
209 MipsUserGenException, /* 1 */
210 MipsTLBInvalidException,/* 2 */
211 MipsTLBInvalidException,/* 3 */
212 MipsUserGenException, /* 4 */
213 MipsUserGenException, /* 5 */
214 MipsUserGenException, /* 6 */
215 MipsUserGenException, /* 7 */
216 MipsUserGenException, /* 8 */
217 MipsUserGenException, /* 9 */
218 MipsUserGenException, /* 10 */
219 MipsUserGenException, /* 11 */
220 MipsUserGenException, /* 12 */
221 MipsUserGenException, /* 13 */
222 MipsUserGenException, /* 14 */
223 MipsUserGenException, /* 15 */
224 MipsUserGenException, /* 16 */
225 MipsUserGenException, /* 17 */
226 MipsUserGenException, /* 18 */
227 MipsUserGenException, /* 19 */
228 MipsUserGenException, /* 20 */
229 MipsUserGenException, /* 21 */
230 MipsUserGenException, /* 22 */
231 MipsUserGenException, /* 23 */
232 MipsUserGenException, /* 24 */
233 MipsUserGenException, /* 25 */
234 MipsUserGenException, /* 26 */
235 MipsUserGenException, /* 27 */
236 MipsUserGenException, /* 28 */
237 MipsUserGenException, /* 29 */
238 MipsUserGenException, /* 20 */
239 MipsUserGenException, /* 31 */
240 };
241
242 char *trap_type[] = {
243 "external interrupt",
244 "TLB modification",
245 "TLB miss (load or instr. fetch)",
246 "TLB miss (store)",
247 "address error (load or I-fetch)",
248 "address error (store)",
249 "bus error (I-fetch)",
250 "bus error (load or store)",
251 "system call",
252 "breakpoint",
253 "reserved instruction",
254 "coprocessor unusable",
255 "arithmetic overflow",
256 "trap",
257 "virtual coherency instruction",
258 "floating point",
259 "reserved 16",
260 "reserved 17",
261 "reserved 18",
262 "reserved 19",
263 "reserved 20",
264 "reserved 21",
265 "reserved 22",
266 "watch",
267 "reserved 24",
268 "reserved 25",
269 "reserved 26",
270 "reserved 27",
271 "reserved 28",
272 "reserved 29",
273 "reserved 30",
274 "virtual coherency data",
275 };
276
277 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
278 struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
279 #endif
280
281 #if defined(DDB) || defined(DEBUG)
282 void stacktrace(struct trapframe *);
283 void logstacktrace(struct trapframe *);
284 #endif
285
286 #define KERNLAND(x) ((vm_offset_t)(x) >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)(x) < VM_MAX_KERNEL_ADDRESS)
287 #define DELAYBRANCH(x) ((int)(x) < 0)
288
289 /*
290 * MIPS load/store access type
291 */
292 enum {
293 MIPS_LHU_ACCESS = 1,
294 MIPS_LH_ACCESS,
295 MIPS_LWU_ACCESS,
296 MIPS_LW_ACCESS,
297 MIPS_LD_ACCESS,
298 MIPS_SH_ACCESS,
299 MIPS_SW_ACCESS,
300 MIPS_SD_ACCESS
301 };
302
303 char *access_name[] = {
304 "Load Halfword Unsigned",
305 "Load Halfword",
306 "Load Word Unsigned",
307 "Load Word",
308 "Load Doubleword",
309 "Store Halfword",
310 "Store Word",
311 "Store Doubleword"
312 };
313
314 #ifdef CPU_CNMIPS
315 #include <machine/octeon_cop2.h>
316 #endif
317
318 static int allow_unaligned_acc = 1;
319
320 SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW,
321 &allow_unaligned_acc, 0, "Allow unaligned accesses");
322
323 /*
324 * FP emulation is assumed to work on O32, but the code is outdated and crufty
325 * enough that it's a more sensible default to have it disabled when using
326 * other ABIs. At the very least, it needs a lot of help in using
327 * type-semantic ABI-oblivious macros for everything it does.
328 */
329 #if defined(__mips_o32)
330 static int emulate_fp = 1;
331 #else
332 static int emulate_fp = 0;
333 #endif
334 SYSCTL_INT(_machdep, OID_AUTO, emulate_fp, CTLFLAG_RW,
335 &emulate_fp, 0, "Emulate unimplemented FPU instructions");
336
337 static int emulate_unaligned_access(struct trapframe *frame, int mode);
338
339 extern void fswintrberr(void); /* XXX */
340
341 int
342 cpu_fetch_syscall_args(struct thread *td)
343 {
344 struct trapframe *locr0;
345 struct sysentvec *se;
346 struct syscall_args *sa;
347 int error, nsaved;
348
349 locr0 = td->td_frame;
350 sa = &td->td_sa;
351
352 bzero(sa->args, sizeof(sa->args));
353
354 /* compute next PC after syscall instruction */
355 td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */
356 if (DELAYBRANCH(sa->trapframe->cause)) /* Check BD bit */
357 locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0);
358 else
359 locr0->pc += sizeof(int);
360 sa->code = locr0->v0;
361
362 switch (sa->code) {
363 case SYS___syscall:
364 case SYS_syscall:
365 /*
366 * This is an indirect syscall, in which the code is the first argument.
367 */
368 #if (!defined(__mips_n32) && !defined(__mips_n64)) || defined(COMPAT_FREEBSD32)
369 if (sa->code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
370 /*
371 * Like syscall, but code is a quad, so as to maintain alignment
372 * for the rest of the arguments.
373 */
374 if (_QUAD_LOWWORD == 0)
375 sa->code = locr0->a0;
376 else
377 sa->code = locr0->a1;
378 sa->args[0] = locr0->a2;
379 sa->args[1] = locr0->a3;
380 nsaved = 2;
381 break;
382 }
383 #endif
384 /*
385 * This is either not a quad syscall, or is a quad syscall with a
386 * new ABI in which quads fit in a single register.
387 */
388 sa->code = locr0->a0;
389 sa->args[0] = locr0->a1;
390 sa->args[1] = locr0->a2;
391 sa->args[2] = locr0->a3;
392 nsaved = 3;
393 #if defined(__mips_n32) || defined(__mips_n64)
394 #ifdef COMPAT_FREEBSD32
395 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
396 #endif
397 /*
398 * Non-o32 ABIs support more arguments in registers.
399 */
400 sa->args[3] = locr0->a4;
401 sa->args[4] = locr0->a5;
402 sa->args[5] = locr0->a6;
403 sa->args[6] = locr0->a7;
404 nsaved += 4;
405 #ifdef COMPAT_FREEBSD32
406 }
407 #endif
408 #endif
409 break;
410 default:
411 /*
412 * A direct syscall, arguments are just parameters to the syscall.
413 */
414 sa->args[0] = locr0->a0;
415 sa->args[1] = locr0->a1;
416 sa->args[2] = locr0->a2;
417 sa->args[3] = locr0->a3;
418 nsaved = 4;
419 #if defined (__mips_n32) || defined(__mips_n64)
420 #ifdef COMPAT_FREEBSD32
421 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
422 #endif
423 /*
424 * Non-o32 ABIs support more arguments in registers.
425 */
426 sa->args[4] = locr0->a4;
427 sa->args[5] = locr0->a5;
428 sa->args[6] = locr0->a6;
429 sa->args[7] = locr0->a7;
430 nsaved += 4;
431 #ifdef COMPAT_FREEBSD32
432 }
433 #endif
434 #endif
435 break;
436 }
437
438 #ifdef TRAP_DEBUG
439 if (trap_debug)
440 printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
441 #endif
442
443 se = td->td_proc->p_sysent;
444 /*
445 * XXX
446 * Shouldn't this go before switching on the code?
447 */
448 if (se->sv_mask)
449 sa->code &= se->sv_mask;
450
451 if (sa->code >= se->sv_size)
452 sa->callp = &se->sv_table[0];
453 else
454 sa->callp = &se->sv_table[sa->code];
455
456 sa->narg = sa->callp->sy_narg;
457
458 if (sa->narg > nsaved) {
459 #if defined(__mips_n32) || defined(__mips_n64)
460 /*
461 * XXX
462 * Is this right for new ABIs? I think the 4 there
463 * should be 8, size there are 8 registers to skip,
464 * not 4, but I'm not certain.
465 */
466 #ifdef COMPAT_FREEBSD32
467 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32))
468 #endif
469 printf("SYSCALL #%u pid:%u, narg (%u) > nsaved (%u).\n",
470 sa->code, td->td_proc->p_pid, sa->narg, nsaved);
471 #endif
472 #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32)
473 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
474 unsigned i;
475 int32_t arg;
476
477 error = 0; /* XXX GCC is awful. */
478 for (i = nsaved; i < sa->narg; i++) {
479 error = copyin((caddr_t)(intptr_t)(locr0->sp +
480 (4 + (i - nsaved)) * sizeof(int32_t)),
481 (caddr_t)&arg, sizeof arg);
482 if (error != 0)
483 break;
484 sa->args[i] = arg;
485 }
486 } else
487 #endif
488 error = copyin((caddr_t)(intptr_t)(locr0->sp +
489 4 * sizeof(register_t)), (caddr_t)&sa->args[nsaved],
490 (u_int)(sa->narg - nsaved) * sizeof(register_t));
491 if (error != 0) {
492 locr0->v0 = error;
493 locr0->a3 = 1;
494 }
495 } else
496 error = 0;
497
498 if (error == 0) {
499 td->td_retval[0] = 0;
500 td->td_retval[1] = locr0->v1;
501 }
502
503 return (error);
504 }
505
506 #undef __FBSDID
507 #define __FBSDID(x)
508 #include "../../kern/subr_syscall.c"
509
510 /*
511 * Handle an exception.
512 * Called from MipsKernGenException() or MipsUserGenException()
513 * when a processor trap occurs.
514 * In the case of a kernel trap, we return the pc where to resume if
515 * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
516 */
517 register_t
518 trap(struct trapframe *trapframe)
519 {
520 int type, usermode;
521 int i = 0;
522 unsigned ucode = 0;
523 struct thread *td = curthread;
524 struct proc *p = curproc;
525 vm_prot_t ftype;
526 pmap_t pmap;
527 int access_type;
528 ksiginfo_t ksi;
529 char *msg = NULL;
530 intptr_t addr = 0;
531 register_t pc;
532 int cop;
533 register_t *frame_regs;
534
535 trapdebug_enter(trapframe, 0);
536
537 type = (trapframe->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
538 if (TRAPF_USERMODE(trapframe)) {
539 type |= T_USER;
540 usermode = 1;
541 } else {
542 usermode = 0;
543 }
544
545 /*
546 * Enable hardware interrupts if they were on before the trap. If it
547 * was off disable all so we don't accidently enable it when doing a
548 * return to userland.
549 */
550 if (trapframe->sr & MIPS_SR_INT_IE) {
551 set_intr_mask(trapframe->sr & MIPS_SR_INT_MASK);
552 intr_enable();
553 } else {
554 intr_disable();
555 }
556
557 #ifdef TRAP_DEBUG
558 if (trap_debug) {
559 static vm_offset_t last_badvaddr = 0;
560 static vm_offset_t this_badvaddr = 0;
561 static int count = 0;
562 u_int32_t pid;
563
564 printf("trap type %x (%s - ", type,
565 trap_type[type & (~T_USER)]);
566
567 if (type & T_USER)
568 printf("user mode)\n");
569 else
570 printf("kernel mode)\n");
571
572 #ifdef SMP
573 printf("cpuid = %d\n", PCPU_GET(cpuid));
574 #endif
575 pid = mips_rd_entryhi() & TLBHI_ASID_MASK;
576 printf("badaddr = %#jx, pc = %#jx, ra = %#jx, sp = %#jx, sr = %jx, pid = %d, ASID = %u\n",
577 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
578 (intmax_t)trapframe->sp, (intmax_t)trapframe->sr,
579 (curproc ? curproc->p_pid : -1), pid);
580
581 switch (type & ~T_USER) {
582 case T_TLB_MOD:
583 case T_TLB_LD_MISS:
584 case T_TLB_ST_MISS:
585 case T_ADDR_ERR_LD:
586 case T_ADDR_ERR_ST:
587 this_badvaddr = trapframe->badvaddr;
588 break;
589 case T_SYSCALL:
590 this_badvaddr = trapframe->ra;
591 break;
592 default:
593 this_badvaddr = trapframe->pc;
594 break;
595 }
596 if ((last_badvaddr == this_badvaddr) &&
597 ((type & ~T_USER) != T_SYSCALL)) {
598 if (++count == 3) {
599 trap_frame_dump(trapframe);
600 panic("too many faults at %p\n", (void *)last_badvaddr);
601 }
602 } else {
603 last_badvaddr = this_badvaddr;
604 count = 0;
605 }
606 }
607 #endif
608
609 #ifdef KDTRACE_HOOKS
610 /*
611 * A trap can occur while DTrace executes a probe. Before
612 * executing the probe, DTrace blocks re-scheduling and sets
613 * a flag in its per-cpu flags to indicate that it doesn't
614 * want to fault. On returning from the probe, the no-fault
615 * flag is cleared and finally re-scheduling is enabled.
616 *
617 * If the DTrace kernel module has registered a trap handler,
618 * call it and if it returns non-zero, assume that it has
619 * handled the trap and modified the trap frame so that this
620 * function can return normally.
621 */
622 /*
623 * XXXDTRACE: add pid probe handler here (if ever)
624 */
625 if (!usermode) {
626 if (dtrace_trap_func != NULL &&
627 (*dtrace_trap_func)(trapframe, type) != 0)
628 return (trapframe->pc);
629 }
630 #endif
631
632 switch (type) {
633 case T_MCHECK:
634 #ifdef DDB
635 kdb_trap(type, 0, trapframe);
636 #endif
637 panic("MCHECK\n");
638 break;
639 case T_TLB_MOD:
640 /* check for kernel address */
641 if (KERNLAND(trapframe->badvaddr)) {
642 if (pmap_emulate_modified(kernel_pmap,
643 trapframe->badvaddr) != 0) {
644 ftype = VM_PROT_WRITE;
645 goto kernel_fault;
646 }
647 return (trapframe->pc);
648 }
649 /* FALLTHROUGH */
650
651 case T_TLB_MOD + T_USER:
652 pmap = &p->p_vmspace->vm_pmap;
653 if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) {
654 ftype = VM_PROT_WRITE;
655 goto dofault;
656 }
657 if (!usermode)
658 return (trapframe->pc);
659 goto out;
660
661 case T_TLB_LD_MISS:
662 case T_TLB_ST_MISS:
663 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
664 /* check for kernel address */
665 if (KERNLAND(trapframe->badvaddr)) {
666 vm_offset_t va;
667 int rv;
668
669 kernel_fault:
670 va = trunc_page((vm_offset_t)trapframe->badvaddr);
671 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
672 if (rv == KERN_SUCCESS)
673 return (trapframe->pc);
674 if (td->td_pcb->pcb_onfault != NULL) {
675 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
676 td->td_pcb->pcb_onfault = NULL;
677 return (pc);
678 }
679 goto err;
680 }
681
682 /*
683 * It is an error for the kernel to access user space except
684 * through the copyin/copyout routines.
685 */
686 if (td->td_pcb->pcb_onfault == NULL)
687 goto err;
688
689 /* check for fuswintr() or suswintr() getting a page fault */
690 /* XXX There must be a nicer way to do this. */
691 if (td->td_pcb->pcb_onfault == fswintrberr) {
692 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
693 td->td_pcb->pcb_onfault = NULL;
694 return (pc);
695 }
696
697 goto dofault;
698
699 case T_TLB_LD_MISS + T_USER:
700 ftype = VM_PROT_READ;
701 goto dofault;
702
703 case T_TLB_ST_MISS + T_USER:
704 ftype = VM_PROT_WRITE;
705 dofault:
706 {
707 vm_offset_t va;
708 struct vmspace *vm;
709 vm_map_t map;
710 int rv = 0;
711
712 vm = p->p_vmspace;
713 map = &vm->vm_map;
714 va = trunc_page((vm_offset_t)trapframe->badvaddr);
715 if (KERNLAND(trapframe->badvaddr)) {
716 /*
717 * Don't allow user-mode faults in kernel
718 * address space.
719 */
720 goto nogo;
721 }
722
723 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
724 /*
725 * XXXDTRACE: add dtrace_doubletrap_func here?
726 */
727 #ifdef VMFAULT_TRACE
728 printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n",
729 map, &vm->vm_pmap, (void *)va, (void *)(intptr_t)trapframe->badvaddr,
730 ftype, VM_FAULT_NORMAL, rv, (void *)(intptr_t)trapframe->pc);
731 #endif
732
733 if (rv == KERN_SUCCESS) {
734 if (!usermode) {
735 return (trapframe->pc);
736 }
737 goto out;
738 }
739 nogo:
740 if (!usermode) {
741 if (td->td_pcb->pcb_onfault != NULL) {
742 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
743 td->td_pcb->pcb_onfault = NULL;
744 return (pc);
745 }
746 goto err;
747 }
748 ucode = ftype;
749 i = ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
750 addr = trapframe->pc;
751
752 msg = "BAD_PAGE_FAULT";
753 log_bad_page_fault(msg, trapframe, type);
754
755 break;
756 }
757
758 case T_ADDR_ERR_LD + T_USER: /* misaligned or kseg access */
759 case T_ADDR_ERR_ST + T_USER: /* misaligned or kseg access */
760 if (trapframe->badvaddr < 0 ||
761 trapframe->badvaddr >= VM_MAXUSER_ADDRESS) {
762 msg = "ADDRESS_SPACE_ERR";
763 } else if (allow_unaligned_acc) {
764 int mode;
765
766 if (type == (T_ADDR_ERR_LD + T_USER))
767 mode = VM_PROT_READ;
768 else
769 mode = VM_PROT_WRITE;
770
771 access_type = emulate_unaligned_access(trapframe, mode);
772 if (access_type != 0)
773 goto out;
774 msg = "ALIGNMENT_FIX_ERR";
775 } else {
776 msg = "ADDRESS_ERR";
777 }
778
779 /* FALL THROUGH */
780
781 case T_BUS_ERR_IFETCH + T_USER: /* BERR asserted to cpu */
782 case T_BUS_ERR_LD_ST + T_USER: /* BERR asserted to cpu */
783 ucode = 0; /* XXX should be VM_PROT_something */
784 i = SIGBUS;
785 addr = trapframe->pc;
786 if (!msg)
787 msg = "BUS_ERR";
788 log_bad_page_fault(msg, trapframe, type);
789 break;
790
791 case T_SYSCALL + T_USER:
792 {
793 int error;
794
795 td->td_sa.trapframe = trapframe;
796 error = syscallenter(td);
797
798 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
799 if (trp == trapdebug)
800 trapdebug[TRAPSIZE - 1].code = td->td_sa.code;
801 else
802 trp[-1].code = td->td_sa.code;
803 #endif
804 trapdebug_enter(td->td_frame, -td->td_sa.code);
805
806 /*
807 * The sync'ing of I & D caches for SYS_ptrace() is
808 * done by procfs_domem() through procfs_rwmem()
809 * instead of being done here under a special check
810 * for SYS_ptrace().
811 */
812 syscallret(td, error);
813 return (trapframe->pc);
814 }
815
816 #if defined(KDTRACE_HOOKS) || defined(DDB)
817 case T_BREAK:
818 #ifdef KDTRACE_HOOKS
819 if (!usermode && dtrace_invop_jump_addr != 0) {
820 dtrace_invop_jump_addr(trapframe);
821 return (trapframe->pc);
822 }
823 #endif
824 #ifdef DDB
825 kdb_trap(type, 0, trapframe);
826 return (trapframe->pc);
827 #endif
828 #endif
829
830 case T_BREAK + T_USER:
831 {
832 intptr_t va;
833 uint32_t instr;
834
835 /* compute address of break instruction */
836 va = trapframe->pc;
837 if (DELAYBRANCH(trapframe->cause))
838 va += sizeof(int);
839
840 /* read break instruction */
841 instr = fuword32((caddr_t)va);
842 #if 0
843 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
844 p->p_comm, p->p_pid, instr, trapframe->pc,
845 p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
846 #endif
847 if (td->td_md.md_ss_addr != va ||
848 instr != MIPS_BREAK_SSTEP) {
849 i = SIGTRAP;
850 addr = trapframe->pc;
851 break;
852 }
853 /*
854 * The restoration of the original instruction and
855 * the clearing of the breakpoint will be done later
856 * by the call to ptrace_clear_single_step() in
857 * issignal() when SIGTRAP is processed.
858 */
859 addr = trapframe->pc;
860 i = SIGTRAP;
861 break;
862 }
863
864 case T_IWATCH + T_USER:
865 case T_DWATCH + T_USER:
866 {
867 intptr_t va;
868
869 /* compute address of trapped instruction */
870 va = trapframe->pc;
871 if (DELAYBRANCH(trapframe->cause))
872 va += sizeof(int);
873 printf("watch exception @ %p\n", (void *)va);
874 i = SIGTRAP;
875 addr = va;
876 break;
877 }
878
879 case T_TRAP + T_USER:
880 {
881 intptr_t va;
882 uint32_t instr;
883 struct trapframe *locr0 = td->td_frame;
884
885 /* compute address of trap instruction */
886 va = trapframe->pc;
887 if (DELAYBRANCH(trapframe->cause))
888 va += sizeof(int);
889 /* read break instruction */
890 instr = fuword32((caddr_t)va);
891
892 if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */
893 locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
894 0);
895 } else {
896 locr0->pc += sizeof(int);
897 }
898 addr = va;
899 i = SIGEMT; /* Stuff it with something for now */
900 break;
901 }
902
903 case T_RES_INST + T_USER:
904 {
905 InstFmt inst;
906 inst = *(InstFmt *)(intptr_t)trapframe->pc;
907 switch (inst.RType.op) {
908 case OP_SPECIAL3:
909 switch (inst.RType.func) {
910 case OP_RDHWR:
911 /* Register 29 used for TLS */
912 if (inst.RType.rd == 29) {
913 frame_regs = &(trapframe->zero);
914 frame_regs[inst.RType.rt] = (register_t)(intptr_t)td->td_md.md_tls;
915 #if defined(__mips_n64) && defined(COMPAT_FREEBSD32)
916 if (SV_PROC_FLAG(td->td_proc, SV_ILP32))
917 frame_regs[inst.RType.rt] += TLS_TP_OFFSET + TLS_TCB_SIZE32;
918 else
919 #endif
920 frame_regs[inst.RType.rt] += TLS_TP_OFFSET + TLS_TCB_SIZE;
921 trapframe->pc += sizeof(int);
922 goto out;
923 }
924 break;
925 }
926 break;
927 }
928
929 log_illegal_instruction("RES_INST", trapframe);
930 i = SIGILL;
931 addr = trapframe->pc;
932 }
933 break;
934 case T_C2E:
935 case T_C2E + T_USER:
936 goto err;
937 break;
938 case T_COP_UNUSABLE:
939 #ifdef CPU_CNMIPS
940 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
941 /* Handle only COP2 exception */
942 if (cop != 2)
943 goto err;
944
945 addr = trapframe->pc;
946 /* save userland cop2 context if it has been touched */
947 if ((td->td_md.md_flags & MDTD_COP2USED) &&
948 (td->td_md.md_cop2owner == COP2_OWNER_USERLAND)) {
949 if (td->td_md.md_ucop2)
950 octeon_cop2_save(td->td_md.md_ucop2);
951 else
952 panic("COP2 was used in user mode but md_ucop2 is NULL");
953 }
954
955 if (td->td_md.md_cop2 == NULL) {
956 td->td_md.md_cop2 = octeon_cop2_alloc_ctx();
957 if (td->td_md.md_cop2 == NULL)
958 panic("Failed to allocate COP2 context");
959 memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2));
960 }
961
962 octeon_cop2_restore(td->td_md.md_cop2);
963
964 /* Make userland re-request its context */
965 td->td_frame->sr &= ~MIPS_SR_COP_2_BIT;
966 td->td_md.md_flags |= MDTD_COP2USED;
967 td->td_md.md_cop2owner = COP2_OWNER_KERNEL;
968 /* Enable COP2, it will be disabled in cpu_switch */
969 mips_wr_status(mips_rd_status() | MIPS_SR_COP_2_BIT);
970 return (trapframe->pc);
971 #else
972 goto err;
973 break;
974 #endif
975
976 case T_COP_UNUSABLE + T_USER:
977 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
978 if (cop == 1) {
979 #if !defined(CPU_HAVEFPU)
980 /* FP (COP1) instruction */
981 log_illegal_instruction("COP1_UNUSABLE", trapframe);
982 i = SIGILL;
983 break;
984 #else
985 addr = trapframe->pc;
986 MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame);
987 PCPU_SET(fpcurthread, td);
988 td->td_frame->sr |= MIPS_SR_COP_1_BIT;
989 td->td_md.md_flags |= MDTD_FPUSED;
990 goto out;
991 #endif
992 }
993 #ifdef CPU_CNMIPS
994 else if (cop == 2) {
995 addr = trapframe->pc;
996 if ((td->td_md.md_flags & MDTD_COP2USED) &&
997 (td->td_md.md_cop2owner == COP2_OWNER_KERNEL)) {
998 if (td->td_md.md_cop2)
999 octeon_cop2_save(td->td_md.md_cop2);
1000 else
1001 panic("COP2 was used in kernel mode but md_cop2 is NULL");
1002 }
1003
1004 if (td->td_md.md_ucop2 == NULL) {
1005 td->td_md.md_ucop2 = octeon_cop2_alloc_ctx();
1006 if (td->td_md.md_ucop2 == NULL)
1007 panic("Failed to allocate userland COP2 context");
1008 memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2));
1009 }
1010
1011 octeon_cop2_restore(td->td_md.md_ucop2);
1012
1013 td->td_frame->sr |= MIPS_SR_COP_2_BIT;
1014 td->td_md.md_flags |= MDTD_COP2USED;
1015 td->td_md.md_cop2owner = COP2_OWNER_USERLAND;
1016 goto out;
1017 }
1018 #endif
1019 else {
1020 log_illegal_instruction("COPn_UNUSABLE", trapframe);
1021 i = SIGILL; /* only FPU instructions allowed */
1022 break;
1023 }
1024
1025 case T_FPE:
1026 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1027 trapDump("fpintr");
1028 #else
1029 printf("FPU Trap: PC %#jx CR %x SR %x\n",
1030 (intmax_t)trapframe->pc, (unsigned)trapframe->cause, (unsigned)trapframe->sr);
1031 goto err;
1032 #endif
1033
1034 case T_FPE + T_USER:
1035 if (!emulate_fp) {
1036 i = SIGILL;
1037 addr = trapframe->pc;
1038 break;
1039 }
1040 MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
1041 goto out;
1042
1043 case T_OVFLOW + T_USER:
1044 i = SIGFPE;
1045 addr = trapframe->pc;
1046 break;
1047
1048 case T_ADDR_ERR_LD: /* misaligned access */
1049 case T_ADDR_ERR_ST: /* misaligned access */
1050 #ifdef TRAP_DEBUG
1051 if (trap_debug) {
1052 printf("+++ ADDR_ERR: type = %d, badvaddr = %#jx\n", type,
1053 (intmax_t)trapframe->badvaddr);
1054 }
1055 #endif
1056 /* Only allow emulation on a user address */
1057 if (allow_unaligned_acc &&
1058 ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) {
1059 int mode;
1060
1061 if (type == T_ADDR_ERR_LD)
1062 mode = VM_PROT_READ;
1063 else
1064 mode = VM_PROT_WRITE;
1065
1066 access_type = emulate_unaligned_access(trapframe, mode);
1067 if (access_type != 0)
1068 return (trapframe->pc);
1069 }
1070 /* FALLTHROUGH */
1071
1072 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
1073 if (td->td_pcb->pcb_onfault != NULL) {
1074 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
1075 td->td_pcb->pcb_onfault = NULL;
1076 return (pc);
1077 }
1078
1079 /* FALLTHROUGH */
1080
1081 default:
1082 err:
1083
1084 #if !defined(SMP) && defined(DEBUG)
1085 stacktrace(!usermode ? trapframe : td->td_frame);
1086 trapDump("trap");
1087 #endif
1088 #ifdef SMP
1089 printf("cpu:%d-", PCPU_GET(cpuid));
1090 #endif
1091 printf("Trap cause = %d (%s - ", type,
1092 trap_type[type & (~T_USER)]);
1093
1094 if (type & T_USER)
1095 printf("user mode)\n");
1096 else
1097 printf("kernel mode)\n");
1098
1099 #ifdef TRAP_DEBUG
1100 if (trap_debug)
1101 printf("badvaddr = %#jx, pc = %#jx, ra = %#jx, sr = %#jxx\n",
1102 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
1103 (intmax_t)trapframe->sr);
1104 #endif
1105
1106 #ifdef KDB
1107 if (debugger_on_panic || kdb_active) {
1108 kdb_trap(type, 0, trapframe);
1109 }
1110 #endif
1111 panic("trap");
1112 }
1113 td->td_frame->pc = trapframe->pc;
1114 td->td_frame->cause = trapframe->cause;
1115 td->td_frame->badvaddr = trapframe->badvaddr;
1116 ksiginfo_init_trap(&ksi);
1117 ksi.ksi_signo = i;
1118 ksi.ksi_code = ucode;
1119 ksi.ksi_addr = (void *)addr;
1120 ksi.ksi_trapno = type;
1121 trapsignal(td, &ksi);
1122 out:
1123
1124 /*
1125 * Note: we should only get here if returning to user mode.
1126 */
1127 userret(td, trapframe);
1128 return (trapframe->pc);
1129 }
1130
1131 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1132 void
1133 trapDump(char *msg)
1134 {
1135 register_t s;
1136 int i;
1137
1138 s = intr_disable();
1139 printf("trapDump(%s)\n", msg);
1140 for (i = 0; i < TRAPSIZE; i++) {
1141 if (trp == trapdebug) {
1142 trp = &trapdebug[TRAPSIZE - 1];
1143 } else {
1144 trp--;
1145 }
1146
1147 if (trp->cause == 0)
1148 break;
1149
1150 printf("%s: ADR %jx PC %jx CR %jx SR %jx\n",
1151 trap_type[(trp->cause & MIPS_CR_EXC_CODE) >>
1152 MIPS_CR_EXC_CODE_SHIFT],
1153 (intmax_t)trp->vadr, (intmax_t)trp->pc,
1154 (intmax_t)trp->cause, (intmax_t)trp->status);
1155
1156 printf(" RA %jx SP %jx code %d\n", (intmax_t)trp->ra,
1157 (intmax_t)trp->sp, (int)trp->code);
1158 }
1159 intr_restore(s);
1160 }
1161 #endif
1162
1163
1164 /*
1165 * Return the resulting PC as if the branch was executed.
1166 */
1167 uintptr_t
1168 MipsEmulateBranch(struct trapframe *framePtr, uintptr_t instPC, int fpcCSR,
1169 uintptr_t instptr)
1170 {
1171 InstFmt inst;
1172 register_t *regsPtr = (register_t *) framePtr;
1173 uintptr_t retAddr = 0;
1174 int condition;
1175
1176 #define GetBranchDest(InstPtr, inst) \
1177 (InstPtr + 4 + ((short)inst.IType.imm << 2))
1178
1179
1180 if (instptr) {
1181 if (instptr < MIPS_KSEG0_START)
1182 inst.word = fuword32((void *)instptr);
1183 else
1184 inst = *(InstFmt *) instptr;
1185 } else {
1186 if ((vm_offset_t)instPC < MIPS_KSEG0_START)
1187 inst.word = fuword32((void *)instPC);
1188 else
1189 inst = *(InstFmt *) instPC;
1190 }
1191
1192 switch ((int)inst.JType.op) {
1193 case OP_SPECIAL:
1194 switch ((int)inst.RType.func) {
1195 case OP_JR:
1196 case OP_JALR:
1197 retAddr = regsPtr[inst.RType.rs];
1198 break;
1199
1200 default:
1201 retAddr = instPC + 4;
1202 break;
1203 }
1204 break;
1205
1206 case OP_BCOND:
1207 switch ((int)inst.IType.rt) {
1208 case OP_BLTZ:
1209 case OP_BLTZL:
1210 case OP_BLTZAL:
1211 case OP_BLTZALL:
1212 if ((int)(regsPtr[inst.RType.rs]) < 0)
1213 retAddr = GetBranchDest(instPC, inst);
1214 else
1215 retAddr = instPC + 8;
1216 break;
1217
1218 case OP_BGEZ:
1219 case OP_BGEZL:
1220 case OP_BGEZAL:
1221 case OP_BGEZALL:
1222 if ((int)(regsPtr[inst.RType.rs]) >= 0)
1223 retAddr = GetBranchDest(instPC, inst);
1224 else
1225 retAddr = instPC + 8;
1226 break;
1227
1228 case OP_TGEI:
1229 case OP_TGEIU:
1230 case OP_TLTI:
1231 case OP_TLTIU:
1232 case OP_TEQI:
1233 case OP_TNEI:
1234 retAddr = instPC + 4; /* Like syscall... */
1235 break;
1236
1237 default:
1238 panic("MipsEmulateBranch: Bad branch cond");
1239 }
1240 break;
1241
1242 case OP_J:
1243 case OP_JAL:
1244 retAddr = (inst.JType.target << 2) |
1245 ((unsigned)(instPC + 4) & 0xF0000000);
1246 break;
1247
1248 case OP_BEQ:
1249 case OP_BEQL:
1250 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1251 retAddr = GetBranchDest(instPC, inst);
1252 else
1253 retAddr = instPC + 8;
1254 break;
1255
1256 case OP_BNE:
1257 case OP_BNEL:
1258 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1259 retAddr = GetBranchDest(instPC, inst);
1260 else
1261 retAddr = instPC + 8;
1262 break;
1263
1264 case OP_BLEZ:
1265 case OP_BLEZL:
1266 if ((int)(regsPtr[inst.RType.rs]) <= 0)
1267 retAddr = GetBranchDest(instPC, inst);
1268 else
1269 retAddr = instPC + 8;
1270 break;
1271
1272 case OP_BGTZ:
1273 case OP_BGTZL:
1274 if ((int)(regsPtr[inst.RType.rs]) > 0)
1275 retAddr = GetBranchDest(instPC, inst);
1276 else
1277 retAddr = instPC + 8;
1278 break;
1279
1280 case OP_COP1:
1281 switch (inst.RType.rs) {
1282 case OP_BCx:
1283 case OP_BCy:
1284 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1285 condition = fpcCSR & MIPS_FPU_COND_BIT;
1286 else
1287 condition = !(fpcCSR & MIPS_FPU_COND_BIT);
1288 if (condition)
1289 retAddr = GetBranchDest(instPC, inst);
1290 else
1291 retAddr = instPC + 8;
1292 break;
1293
1294 default:
1295 retAddr = instPC + 4;
1296 }
1297 break;
1298
1299 default:
1300 retAddr = instPC + 4;
1301 }
1302 return (retAddr);
1303 }
1304
1305
1306 #if defined(DDB) || defined(DEBUG)
1307 /*
1308 * Print a stack backtrace.
1309 */
1310 void
1311 stacktrace(struct trapframe *regs)
1312 {
1313 stacktrace_subr(regs->pc, regs->sp, regs->ra, printf);
1314 }
1315 #endif
1316
1317 static void
1318 log_frame_dump(struct trapframe *frame)
1319 {
1320 log(LOG_ERR, "Trapframe Register Dump:\n");
1321 log(LOG_ERR, "\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1322 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1323
1324 log(LOG_ERR, "\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1325 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1326
1327 #if defined(__mips_n32) || defined(__mips_n64)
1328 log(LOG_ERR, "\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta6: %#jx\n",
1329 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1330
1331 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1332 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1333 #else
1334 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1335 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1336
1337 log(LOG_ERR, "\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1338 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1339 #endif
1340 log(LOG_ERR, "\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1341 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1342
1343 log(LOG_ERR, "\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1344 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1345
1346 log(LOG_ERR, "\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1347 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1348
1349 log(LOG_ERR, "\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1350 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1351
1352 log(LOG_ERR, "\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1353 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1354
1355 #ifdef IC_REG
1356 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\tic: %#jx\n",
1357 (intmax_t)frame->cause, (intmax_t)frame->pc, (intmax_t)frame->ic);
1358 #else
1359 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\n",
1360 (intmax_t)frame->cause, (intmax_t)frame->pc);
1361 #endif
1362 }
1363
1364 #ifdef TRAP_DEBUG
1365 static void
1366 trap_frame_dump(struct trapframe *frame)
1367 {
1368 printf("Trapframe Register Dump:\n");
1369 printf("\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1370 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1371
1372 printf("\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1373 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1374 #if defined(__mips_n32) || defined(__mips_n64)
1375 printf("\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta7: %#jx\n",
1376 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1377
1378 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1379 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1380 #else
1381 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1382 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1383
1384 printf("\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1385 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1386 #endif
1387 printf("\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1388 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1389
1390 printf("\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1391 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1392
1393 printf("\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1394 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1395
1396 printf("\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1397 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1398
1399 printf("\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1400 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1401
1402 #ifdef IC_REG
1403 printf("\tcause: %#jx\tpc: %#jx\tic: %#jx\n",
1404 (intmax_t)frame->cause, (intmax_t)frame->pc, (intmax_t)frame->ic);
1405 #else
1406 printf("\tcause: %#jx\tpc: %#jx\n",
1407 (intmax_t)frame->cause, (intmax_t)frame->pc);
1408 #endif
1409 }
1410
1411 #endif
1412
1413
1414 static void
1415 get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp)
1416 {
1417 pt_entry_t *ptep;
1418 pd_entry_t *pdep;
1419 struct proc *p = curproc;
1420
1421 pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[(va >> SEGSHIFT) & (NPDEPG - 1)]));
1422 if (*pdep)
1423 ptep = pmap_pte(&p->p_vmspace->vm_pmap, va);
1424 else
1425 ptep = (pt_entry_t *)0;
1426
1427 *pdepp = pdep;
1428 *ptepp = ptep;
1429 }
1430
1431 static void
1432 log_illegal_instruction(const char *msg, struct trapframe *frame)
1433 {
1434 pt_entry_t *ptep;
1435 pd_entry_t *pdep;
1436 unsigned int *addr;
1437 struct thread *td;
1438 struct proc *p;
1439 register_t pc;
1440
1441 td = curthread;
1442 p = td->td_proc;
1443
1444 #ifdef SMP
1445 printf("cpuid = %d\n", PCPU_GET(cpuid));
1446 #endif
1447 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1448 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx ra %#jx\n",
1449 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1450 p->p_ucred ? p->p_ucred->cr_uid : -1,
1451 (intmax_t)pc,
1452 (intmax_t)frame->ra);
1453
1454 /* log registers in trap frame */
1455 log_frame_dump(frame);
1456
1457 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1458
1459 /*
1460 * Dump a few words around faulting instruction, if the addres is
1461 * valid.
1462 */
1463 if (!(pc & 3) &&
1464 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1465 /* dump page table entry for faulting instruction */
1466 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1467 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1468
1469 addr = (unsigned int *)(intptr_t)pc;
1470 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1471 addr);
1472 log(LOG_ERR, "%08x %08x %08x %08x\n",
1473 addr[0], addr[1], addr[2], addr[3]);
1474 } else {
1475 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1476 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1477 }
1478 }
1479
1480 static void
1481 log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type)
1482 {
1483 pt_entry_t *ptep;
1484 pd_entry_t *pdep;
1485 unsigned int *addr;
1486 struct thread *td;
1487 struct proc *p;
1488 char *read_or_write;
1489 register_t pc;
1490
1491 trap_type &= ~T_USER;
1492
1493 td = curthread;
1494 p = td->td_proc;
1495
1496 #ifdef SMP
1497 printf("cpuid = %d\n", PCPU_GET(cpuid));
1498 #endif
1499 switch (trap_type) {
1500 case T_TLB_MOD:
1501 case T_TLB_ST_MISS:
1502 case T_ADDR_ERR_ST:
1503 read_or_write = "write";
1504 break;
1505 case T_TLB_LD_MISS:
1506 case T_ADDR_ERR_LD:
1507 case T_BUS_ERR_IFETCH:
1508 read_or_write = "read";
1509 break;
1510 default:
1511 read_or_write = "unknown";
1512 }
1513
1514 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1515 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx got a %s fault "
1516 "(type %#x) at %#jx\n",
1517 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1518 p->p_ucred ? p->p_ucred->cr_uid : -1,
1519 (intmax_t)pc,
1520 read_or_write,
1521 trap_type,
1522 (intmax_t)frame->badvaddr);
1523
1524 /* log registers in trap frame */
1525 log_frame_dump(frame);
1526
1527 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1528
1529 /*
1530 * Dump a few words around faulting instruction, if the addres is
1531 * valid.
1532 */
1533 if (!(pc & 3) && (pc != frame->badvaddr) &&
1534 (trap_type != T_BUS_ERR_IFETCH) &&
1535 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1536 /* dump page table entry for faulting instruction */
1537 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1538 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1539
1540 addr = (unsigned int *)(intptr_t)pc;
1541 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1542 addr);
1543 log(LOG_ERR, "%08x %08x %08x %08x\n",
1544 addr[0], addr[1], addr[2], addr[3]);
1545 } else {
1546 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1547 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1548 }
1549
1550 get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep);
1551 log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n",
1552 (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1553 }
1554
1555
1556 /*
1557 * Unaligned load/store emulation
1558 */
1559 static int
1560 mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, register_t pc)
1561 {
1562 register_t *reg = (register_t *) frame;
1563 u_int32_t inst = *((u_int32_t *)(intptr_t)pc);
1564 register_t value_msb, value;
1565 unsigned size;
1566
1567 /*
1568 * ADDR_ERR faults have higher priority than TLB
1569 * Miss faults. Therefore, it is necessary to
1570 * verify that the faulting address is a valid
1571 * virtual address within the process' address space
1572 * before trying to emulate the unaligned access.
1573 */
1574 switch (MIPS_INST_OPCODE(inst)) {
1575 case OP_LHU: case OP_LH:
1576 case OP_SH:
1577 size = 2;
1578 break;
1579 case OP_LWU: case OP_LW:
1580 case OP_SW:
1581 size = 4;
1582 break;
1583 case OP_LD:
1584 case OP_SD:
1585 size = 8;
1586 break;
1587 default:
1588 printf("%s: unhandled opcode in address error: %#x\n", __func__, MIPS_INST_OPCODE(inst));
1589 return (0);
1590 }
1591
1592 if (!useracc((void *)rounddown2((vm_offset_t)addr, size), size * 2, mode))
1593 return (0);
1594
1595 /*
1596 * XXX
1597 * Handle LL/SC LLD/SCD.
1598 */
1599 switch (MIPS_INST_OPCODE(inst)) {
1600 case OP_LHU:
1601 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1602 lbu_macro(value_msb, addr);
1603 addr += 1;
1604 lbu_macro(value, addr);
1605 value |= value_msb << 8;
1606 reg[MIPS_INST_RT(inst)] = value;
1607 return (MIPS_LHU_ACCESS);
1608
1609 case OP_LH:
1610 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1611 lb_macro(value_msb, addr);
1612 addr += 1;
1613 lbu_macro(value, addr);
1614 value |= value_msb << 8;
1615 reg[MIPS_INST_RT(inst)] = value;
1616 return (MIPS_LH_ACCESS);
1617
1618 case OP_LWU:
1619 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1620 lwl_macro(value, addr);
1621 addr += 3;
1622 lwr_macro(value, addr);
1623 value &= 0xffffffff;
1624 reg[MIPS_INST_RT(inst)] = value;
1625 return (MIPS_LWU_ACCESS);
1626
1627 case OP_LW:
1628 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1629 lwl_macro(value, addr);
1630 addr += 3;
1631 lwr_macro(value, addr);
1632 reg[MIPS_INST_RT(inst)] = value;
1633 return (MIPS_LW_ACCESS);
1634
1635 #if defined(__mips_n32) || defined(__mips_n64)
1636 case OP_LD:
1637 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1638 ldl_macro(value, addr);
1639 addr += 7;
1640 ldr_macro(value, addr);
1641 reg[MIPS_INST_RT(inst)] = value;
1642 return (MIPS_LD_ACCESS);
1643 #endif
1644
1645 case OP_SH:
1646 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1647 value = reg[MIPS_INST_RT(inst)];
1648 value_msb = value >> 8;
1649 sb_macro(value_msb, addr);
1650 addr += 1;
1651 sb_macro(value, addr);
1652 return (MIPS_SH_ACCESS);
1653
1654 case OP_SW:
1655 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1656 value = reg[MIPS_INST_RT(inst)];
1657 swl_macro(value, addr);
1658 addr += 3;
1659 swr_macro(value, addr);
1660 return (MIPS_SW_ACCESS);
1661
1662 #if defined(__mips_n32) || defined(__mips_n64)
1663 case OP_SD:
1664 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1665 value = reg[MIPS_INST_RT(inst)];
1666 sdl_macro(value, addr);
1667 addr += 7;
1668 sdr_macro(value, addr);
1669 return (MIPS_SD_ACCESS);
1670 #endif
1671 }
1672 panic("%s: should not be reached.", __func__);
1673 }
1674
1675
1676 /*
1677 * XXX TODO: SMP?
1678 */
1679 static struct timeval unaligned_lasterr;
1680 static int unaligned_curerr;
1681
1682 static int unaligned_pps_log_limit = 4;
1683
1684 SYSCTL_INT(_machdep, OID_AUTO, unaligned_log_pps_limit, CTLFLAG_RWTUN,
1685 &unaligned_pps_log_limit, 0,
1686 "limit number of userland unaligned log messages per second");
1687
1688 static int
1689 emulate_unaligned_access(struct trapframe *frame, int mode)
1690 {
1691 register_t pc;
1692 int access_type = 0;
1693 struct thread *td = curthread;
1694 struct proc *p = curproc;
1695
1696 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1697
1698 /*
1699 * Fall through if it's instruction fetch exception
1700 */
1701 if (!((pc & 3) || (pc == frame->badvaddr))) {
1702
1703 /*
1704 * Handle unaligned load and store
1705 */
1706
1707 /*
1708 * Return access type if the instruction was emulated.
1709 * Otherwise restore pc and fall through.
1710 */
1711 access_type = mips_unaligned_load_store(frame,
1712 mode, frame->badvaddr, pc);
1713
1714 if (access_type) {
1715 if (DELAYBRANCH(frame->cause))
1716 frame->pc = MipsEmulateBranch(frame, frame->pc,
1717 0, 0);
1718 else
1719 frame->pc += 4;
1720
1721 if (ppsratecheck(&unaligned_lasterr,
1722 &unaligned_curerr, unaligned_pps_log_limit)) {
1723 /* XXX TODO: keep global/tid/pid counters? */
1724 log(LOG_INFO,
1725 "Unaligned %s: pid=%ld (%s), tid=%ld, "
1726 "pc=%#jx, badvaddr=%#jx\n",
1727 access_name[access_type - 1],
1728 (long) p->p_pid,
1729 p->p_comm,
1730 (long) td->td_tid,
1731 (intmax_t)pc,
1732 (intmax_t)frame->badvaddr);
1733 }
1734 }
1735 }
1736 return access_type;
1737 }
Cache object: 08a6f8cbe148d3c2089ffa789a017e82
|