FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/trap.c
1 /* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */
2
3 /*-
4 * Copyright 2004 Olivier Houchard
5 * Copyright 2003 Wasabi Systems, Inc.
6 * All rights reserved.
7 *
8 * Written by Steve C. Woodford for Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed for the NetBSD Project by
21 * Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1994-1997 Mark Brinicombe.
40 * Copyright (c) 1994 Brini.
41 * All rights reserved.
42 *
43 * This code is derived from software written for Brini by Mark Brinicombe
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by Brini.
56 * 4. The name of the company nor the name of the author may be used to
57 * endorse or promote products derived from this software without specific
58 * prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
61 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
62 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
64 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
65 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
66 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * RiscBSD kernel project
73 *
74 * fault.c
75 *
76 * Fault handlers
77 *
78 * Created : 28/11/94
79 */
80
81
82 #include "opt_ktrace.h"
83
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD: releng/6.2/sys/arm/arm/trap.c 159888 2006-06-23 17:39:57Z cognet $");
86
87 #include <sys/types.h>
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/proc.h>
92 #include <sys/kernel.h>
93 #include <sys/lock.h>
94 #include <sys/mutex.h>
95 #include <sys/syscall.h>
96 #include <sys/sysent.h>
97 #include <sys/signalvar.h>
98 #include <sys/ktr.h>
99 #ifdef KTRACE
100 #include <sys/uio.h>
101 #include <sys/ktrace.h>
102 #endif
103 #include <sys/ptrace.h>
104 #include <sys/pioctl.h>
105
106 #include <vm/vm.h>
107 #include <vm/pmap.h>
108 #include <vm/vm_kern.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_extern.h>
111
112 #include <machine/cpuconf.h>
113 #include <machine/vmparam.h>
114 #include <machine/frame.h>
115 #include <machine/katelib.h>
116 #include <machine/cpu.h>
117 #include <machine/intr.h>
118 #include <machine/pcb.h>
119 #include <machine/proc.h>
120 #include <machine/swi.h>
121
122 #ifdef KDB
123 #include <sys/kdb.h>
124 #endif
125
126
127 void swi_handler(trapframe_t *);
128 void undefinedinstruction(trapframe_t *);
129
130 #include <machine/disassem.h>
131 #include <machine/machdep.h>
132
133 extern char fusubailout[];
134
135 #ifdef DEBUG
136 int last_fault_code; /* For the benefit of pmap_fault_fixup() */
137 #endif
138
139 #if defined(CPU_ARM7TDMI)
140 /* These CPUs may need data/prefetch abort fixups */
141 #define CPU_ABORT_FIXUP_REQUIRED
142 #endif
143
144 struct ksig {
145 int signb;
146 u_long code;
147 };
148 struct data_abort {
149 int (*func)(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
150 const char *desc;
151 };
152
153 static int dab_fatal(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
154 static int dab_align(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
155 static int dab_buserr(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
156
157 static const struct data_abort data_aborts[] = {
158 {dab_fatal, "Vector Exception"},
159 {dab_align, "Alignment Fault 1"},
160 {dab_fatal, "Terminal Exception"},
161 {dab_align, "Alignment Fault 3"},
162 {dab_buserr, "External Linefetch Abort (S)"},
163 {NULL, "Translation Fault (S)"},
164 {dab_buserr, "External Linefetch Abort (P)"},
165 {NULL, "Translation Fault (P)"},
166 {dab_buserr, "External Non-Linefetch Abort (S)"},
167 {NULL, "Domain Fault (S)"},
168 {dab_buserr, "External Non-Linefetch Abort (P)"},
169 {NULL, "Domain Fault (P)"},
170 {dab_buserr, "External Translation Abort (L1)"},
171 {NULL, "Permission Fault (S)"},
172 {dab_buserr, "External Translation Abort (L2)"},
173 {NULL, "Permission Fault (P)"}
174 };
175
176 /* Determine if a fault came from user mode */
177 #define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
178
179 /* Determine if 'x' is a permission fault */
180 #define IS_PERMISSION_FAULT(x) \
181 (((1 << ((x) & FAULT_TYPE_MASK)) & \
182 ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0)
183
184 static __inline void
185 call_trapsignal(struct thread *td, int sig, u_long code)
186 {
187
188 trapsignal(td, sig, code);
189 }
190
191 static __inline int
192 data_abort_fixup(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
193 {
194 #ifdef CPU_ABORT_FIXUP_REQUIRED
195 int error;
196
197 /* Call the cpu specific data abort fixup routine */
198 error = cpu_dataabt_fixup(tf);
199 if (__predict_true(error != ABORT_FIXUP_FAILED))
200 return (error);
201
202 /*
203 * Oops, couldn't fix up the instruction
204 */
205 printf("data_abort_fixup: fixup for %s mode data abort failed.\n",
206 TRAP_USERMODE(tf) ? "user" : "kernel");
207 printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
208 *((u_int *)tf->tf_pc));
209 disassemble(tf->tf_pc);
210
211 /* Die now if this happened in kernel mode */
212 if (!TRAP_USERMODE(tf))
213 dab_fatal(tf, fsr, far, td, NULL, ksig);
214
215 return (error);
216 #else
217 return (ABORT_FIXUP_OK);
218 #endif /* CPU_ABORT_FIXUP_REQUIRED */
219 }
220
221 void
222 data_abort_handler(trapframe_t *tf)
223 {
224 struct vm_map *map;
225 struct pcb *pcb;
226 struct thread *td;
227 u_int user, far, fsr;
228 vm_prot_t ftype;
229 void *onfault;
230 vm_offset_t va;
231 u_int sticks = 0;
232 int error = 0;
233 struct ksig ksig;
234 struct proc *p;
235
236
237 /* Grab FAR/FSR before enabling interrupts */
238 far = cpu_faultaddress();
239 fsr = cpu_faultstatus();
240 #if 0
241 printf("data abort: %p (from %p %p)\n", (void*)far, (void*)tf->tf_pc,
242 (void*)tf->tf_svc_lr);
243 #endif
244
245 /* Update vmmeter statistics */
246 #if 0
247 vmexp.traps++;
248 #endif
249
250 td = curthread;
251 p = td->td_proc;
252
253 PCPU_LAZY_INC(cnt.v_trap);
254 /* Data abort came from user mode? */
255 user = TRAP_USERMODE(tf);
256
257 if (user) {
258 sticks = td->td_sticks; td->td_frame = tf;
259 if (td->td_ucred != td->td_proc->p_ucred)
260 cred_update_thread(td);
261 if (td->td_pflags & TDP_SA)
262 thread_user_enter(td);
263
264 }
265 /* Grab the current pcb */
266 pcb = td->td_pcb;
267 /* Re-enable interrupts if they were enabled previously */
268 if (td->td_critnest == 0) {
269 if (__predict_true(tf->tf_spsr & I32_bit) == 0)
270 enable_interrupts(I32_bit);
271 if (__predict_true(tf->tf_spsr & F32_bit) == 0)
272 enable_interrupts(F32_bit);
273 }
274
275 /* Invoke the appropriate handler, if necessary */
276 if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) {
277 if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far,
278 td, &ksig)) {
279 goto do_trapsignal;
280 }
281 goto out;
282 }
283
284 /*
285 * At this point, we're dealing with one of the following data aborts:
286 *
287 * FAULT_TRANS_S - Translation -- Section
288 * FAULT_TRANS_P - Translation -- Page
289 * FAULT_DOMAIN_S - Domain -- Section
290 * FAULT_DOMAIN_P - Domain -- Page
291 * FAULT_PERM_S - Permission -- Section
292 * FAULT_PERM_P - Permission -- Page
293 *
294 * These are the main virtual memory-related faults signalled by
295 * the MMU.
296 */
297
298 /* fusubailout is used by [fs]uswintr to avoid page faulting */
299 if (__predict_false(pcb->pcb_onfault == fusubailout)) {
300 tf->tf_r0 = EFAULT;
301 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
302 return;
303 }
304
305 /*
306 * Make sure the Program Counter is sane. We could fall foul of
307 * someone executing Thumb code, in which case the PC might not
308 * be word-aligned. This would cause a kernel alignment fault
309 * further down if we have to decode the current instruction.
310 * XXX: It would be nice to be able to support Thumb at some point.
311 */
312 if (__predict_false((tf->tf_pc & 3) != 0)) {
313 if (user) {
314 /*
315 * Give the user an illegal instruction signal.
316 */
317 /* Deliver a SIGILL to the process */
318 ksig.signb = SIGILL;
319 ksig.code = 0;
320 goto do_trapsignal;
321 }
322
323 /*
324 * The kernel never executes Thumb code.
325 */
326 printf("\ndata_abort_fault: Misaligned Kernel-mode "
327 "Program Counter\n");
328 dab_fatal(tf, fsr, far, td, &ksig);
329 }
330
331 /* See if the cpu state needs to be fixed up */
332 switch (data_abort_fixup(tf, fsr, far, td, &ksig)) {
333 case ABORT_FIXUP_RETURN:
334 return;
335 case ABORT_FIXUP_FAILED:
336 /* Deliver a SIGILL to the process */
337 ksig.signb = SIGILL;
338 ksig.code = 0;
339 goto do_trapsignal;
340 default:
341 break;
342 }
343
344 va = trunc_page((vm_offset_t)far);
345
346 /*
347 * It is only a kernel address space fault iff:
348 * 1. user == 0 and
349 * 2. pcb_onfault not set or
350 * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction.
351 */
352 if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS ||
353 (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) &&
354 __predict_true((pcb->pcb_onfault == NULL ||
355 (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) {
356 map = kernel_map;
357
358 /* Was the fault due to the FPE/IPKDB ? */
359 if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) {
360
361 /*
362 * Force exit via userret()
363 * This is necessary as the FPE is an extension to
364 * userland that actually runs in a priveledged mode
365 * but uses USR mode permissions for its accesses.
366 */
367 user = 1;
368 ksig.signb = SIGSEGV;
369 ksig.code = 0;
370 goto do_trapsignal;
371 }
372 } else {
373 map = &td->td_proc->p_vmspace->vm_map;
374 }
375
376 /*
377 * We need to know whether the page should be mapped
378 * as R or R/W. The MMU does not give us the info as
379 * to whether the fault was caused by a read or a write.
380 *
381 * However, we know that a permission fault can only be
382 * the result of a write to a read-only location, so
383 * we can deal with those quickly.
384 *
385 * Otherwise we need to disassemble the instruction
386 * responsible to determine if it was a write.
387 */
388 if (IS_PERMISSION_FAULT(fsr)) {
389 ftype = VM_PROT_WRITE;
390 } else {
391 u_int insn = ReadWord(tf->tf_pc);
392
393 if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */
394 ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */
395 ((insn & 0x0a100000) == 0x08000000)) /* STM/CDT */
396 {
397 ftype = VM_PROT_WRITE;
398 }
399 else
400 if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */
401 ftype = VM_PROT_READ | VM_PROT_WRITE;
402 else
403 ftype = VM_PROT_READ;
404 }
405
406 /*
407 * See if the fault is as a result of ref/mod emulation,
408 * or domain mismatch.
409 */
410 #ifdef DEBUG
411 last_fault_code = fsr;
412 #endif
413 if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
414 user)) {
415 goto out;
416 }
417
418 onfault = pcb->pcb_onfault;
419 pcb->pcb_onfault = NULL;
420 if (map != kernel_map) {
421 PROC_LOCK(p);
422 p->p_lock++;
423 PROC_UNLOCK(p);
424 }
425 error = vm_fault(map, va, ftype, (ftype & VM_PROT_WRITE) ?
426 VM_FAULT_DIRTY : VM_FAULT_NORMAL);
427 pcb->pcb_onfault = onfault;
428
429 if (map != kernel_map) {
430 PROC_LOCK(p);
431 p->p_lock--;
432 PROC_UNLOCK(p);
433 }
434 if (__predict_true(error == 0))
435 goto out;
436 if (user == 0) {
437 if (pcb->pcb_onfault) {
438 tf->tf_r0 = error;
439 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
440 return;
441 }
442
443 printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype,
444 error);
445 dab_fatal(tf, fsr, far, td, &ksig);
446 }
447
448
449 if (error == ENOMEM) {
450 printf("VM: pid %d (%s), uid %d killed: "
451 "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm,
452 (td->td_proc->p_ucred) ?
453 td->td_proc->p_ucred->cr_uid : -1);
454 ksig.signb = SIGKILL;
455 } else {
456 ksig.signb = SIGSEGV;
457 }
458 ksig.code = 0;
459 do_trapsignal:
460 call_trapsignal(td, ksig.signb, ksig.code);
461 out:
462 /* If returning to user mode, make sure to invoke userret() */
463 if (user)
464 userret(td, tf, sticks);
465 }
466
467 /*
468 * dab_fatal() handles the following data aborts:
469 *
470 * FAULT_WRTBUF_0 - Vector Exception
471 * FAULT_WRTBUF_1 - Terminal Exception
472 *
473 * We should never see these on a properly functioning system.
474 *
475 * This function is also called by the other handlers if they
476 * detect a fatal problem.
477 *
478 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
479 */
480 static int
481 dab_fatal(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
482 {
483 const char *mode;
484
485 mode = TRAP_USERMODE(tf) ? "user" : "kernel";
486
487 if (td != NULL) {
488 printf("Fatal %s mode data abort: '%s'\n", mode,
489 data_aborts[fsr & FAULT_TYPE_MASK].desc);
490 printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
491 if ((fsr & FAULT_IMPRECISE) == 0)
492 printf("%08x, ", far);
493 else
494 printf("Invalid, ");
495 printf("spsr=%08x\n", tf->tf_spsr);
496 } else {
497 printf("Fatal %s mode prefetch abort at 0x%08x\n",
498 mode, tf->tf_pc);
499 printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
500 }
501
502 printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
503 tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
504 printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
505 tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
506 printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
507 tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
508 printf("r12=%08x, ", tf->tf_r12);
509
510 if (TRAP_USERMODE(tf))
511 printf("usp=%08x, ulr=%08x",
512 tf->tf_usr_sp, tf->tf_usr_lr);
513 else
514 printf("ssp=%08x, slr=%08x",
515 tf->tf_svc_sp, tf->tf_svc_lr);
516 printf(", pc =%08x\n\n", tf->tf_pc);
517
518 #ifdef KDB
519 kdb_trap(fsr, 0, tf);
520 #endif
521 panic("Fatal abort");
522 /*NOTREACHED*/
523 }
524
525 /*
526 * dab_align() handles the following data aborts:
527 *
528 * FAULT_ALIGN_0 - Alignment fault
529 * FAULT_ALIGN_0 - Alignment fault
530 *
531 * These faults are fatal if they happen in kernel mode. Otherwise, we
532 * deliver a bus error to the process.
533 */
534 static int
535 dab_align(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
536 {
537
538 /* Alignment faults are always fatal if they occur in kernel mode */
539 if (!TRAP_USERMODE(tf)) {
540 if (!td || !td->td_pcb->pcb_onfault)
541 dab_fatal(tf, fsr, far, td, ksig);
542 tf->tf_r0 = EFAULT;
543 tf->tf_pc = (int)td->td_pcb->pcb_onfault;
544 return (0);
545 }
546
547 /* pcb_onfault *must* be NULL at this point */
548
549 /* See if the cpu state needs to be fixed up */
550 (void) data_abort_fixup(tf, fsr, far, td, ksig);
551
552 /* Deliver a bus error signal to the process */
553 ksig->code = 0;
554 ksig->signb = SIGBUS;
555 td->td_frame = tf;
556
557 return (1);
558 }
559
560 /*
561 * dab_buserr() handles the following data aborts:
562 *
563 * FAULT_BUSERR_0 - External Abort on Linefetch -- Section
564 * FAULT_BUSERR_1 - External Abort on Linefetch -- Page
565 * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section
566 * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page
567 * FAULT_BUSTRNL1 - External abort on Translation -- Level 1
568 * FAULT_BUSTRNL2 - External abort on Translation -- Level 2
569 *
570 * If pcb_onfault is set, flag the fault and return to the handler.
571 * If the fault occurred in user mode, give the process a SIGBUS.
572 *
573 * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2
574 * can be flagged as imprecise in the FSR. This causes a real headache
575 * since some of the machine state is lost. In this case, tf->tf_pc
576 * may not actually point to the offending instruction. In fact, if
577 * we've taken a double abort fault, it generally points somewhere near
578 * the top of "data_abort_entry" in exception.S.
579 *
580 * In all other cases, these data aborts are considered fatal.
581 */
582 static int
583 dab_buserr(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
584 {
585 struct pcb *pcb = td->td_pcb;
586
587 #ifdef __XSCALE__
588 if ((fsr & FAULT_IMPRECISE) != 0 &&
589 (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) {
590 /*
591 * Oops, an imprecise, double abort fault. We've lost the
592 * r14_abt/spsr_abt values corresponding to the original
593 * abort, and the spsr saved in the trapframe indicates
594 * ABT mode.
595 */
596 tf->tf_spsr &= ~PSR_MODE;
597
598 /*
599 * We use a simple heuristic to determine if the double abort
600 * happened as a result of a kernel or user mode access.
601 * If the current trapframe is at the top of the kernel stack,
602 * the fault _must_ have come from user mode.
603 */
604 if (tf != ((trapframe_t *)pcb->un_32.pcb32_sp) - 1) {
605 /*
606 * Kernel mode. We're either about to die a
607 * spectacular death, or pcb_onfault will come
608 * to our rescue. Either way, the current value
609 * of tf->tf_pc is irrelevant.
610 */
611 tf->tf_spsr |= PSR_SVC32_MODE;
612 if (pcb->pcb_onfault == NULL)
613 printf("\nKernel mode double abort!\n");
614 } else {
615 /*
616 * User mode. We've lost the program counter at the
617 * time of the fault (not that it was accurate anyway;
618 * it's not called an imprecise fault for nothing).
619 * About all we can do is copy r14_usr to tf_pc and
620 * hope for the best. The process is about to get a
621 * SIGBUS, so it's probably history anyway.
622 */
623 tf->tf_spsr |= PSR_USR32_MODE;
624 tf->tf_pc = tf->tf_usr_lr;
625 }
626 }
627
628 /* FAR is invalid for imprecise exceptions */
629 if ((fsr & FAULT_IMPRECISE) != 0)
630 far = 0;
631 #endif /* __XSCALE__ */
632
633 if (pcb->pcb_onfault) {
634 tf->tf_r0 = EFAULT;
635 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
636 return (0);
637 }
638
639 /* See if the cpu state needs to be fixed up */
640 (void) data_abort_fixup(tf, fsr, far, td, ksig);
641
642 /*
643 * At this point, if the fault happened in kernel mode, we're toast
644 */
645 if (!TRAP_USERMODE(tf))
646 dab_fatal(tf, fsr, far, td, ksig);
647
648 /* Deliver a bus error signal to the process */
649 ksig->signb = SIGBUS;
650 ksig->code = 0;
651 td->td_frame = tf;
652
653 return (1);
654 }
655
656 static __inline int
657 prefetch_abort_fixup(trapframe_t *tf, struct ksig *ksig)
658 {
659 #ifdef CPU_ABORT_FIXUP_REQUIRED
660 int error;
661
662 /* Call the cpu specific prefetch abort fixup routine */
663 error = cpu_prefetchabt_fixup(tf);
664 if (__predict_true(error != ABORT_FIXUP_FAILED))
665 return (error);
666
667 /*
668 * Oops, couldn't fix up the instruction
669 */
670 printf(
671 "prefetch_abort_fixup: fixup for %s mode prefetch abort failed.\n",
672 TRAP_USERMODE(tf) ? "user" : "kernel");
673 printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
674 *((u_int *)tf->tf_pc));
675 disassemble(tf->tf_pc);
676
677 /* Die now if this happened in kernel mode */
678 if (!TRAP_USERMODE(tf))
679 dab_fatal(tf, 0, tf->tf_pc, NULL, ksig);
680
681 return (error);
682 #else
683 return (ABORT_FIXUP_OK);
684 #endif /* CPU_ABORT_FIXUP_REQUIRED */
685 }
686
687 /*
688 * void prefetch_abort_handler(trapframe_t *tf)
689 *
690 * Abort handler called when instruction execution occurs at
691 * a non existent or restricted (access permissions) memory page.
692 * If the address is invalid and we were in SVC mode then panic as
693 * the kernel should never prefetch abort.
694 * If the address is invalid and the page is mapped then the user process
695 * does no have read permission so send it a signal.
696 * Otherwise fault the page in and try again.
697 */
698 void
699 prefetch_abort_handler(trapframe_t *tf)
700 {
701 struct thread *td;
702 struct proc * p;
703 struct vm_map *map;
704 vm_offset_t fault_pc, va;
705 int error = 0;
706 u_int sticks = 0;
707 struct ksig ksig;
708
709
710 #if 0
711 /* Update vmmeter statistics */
712 uvmexp.traps++;
713 #endif
714 #if 0
715 printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc,
716 (void*)tf->tf_usr_lr);
717 #endif
718
719 td = curthread;
720 p = td->td_proc;
721 PCPU_LAZY_INC(cnt.v_trap);
722
723 if (TRAP_USERMODE(tf)) {
724 td->td_frame = tf;
725 if (td->td_ucred != td->td_proc->p_ucred)
726 cred_update_thread(td);
727 if (td->td_proc->p_flag & P_SA)
728 thread_user_enter(td);
729 }
730 fault_pc = tf->tf_pc;
731 if (td->td_critnest == 0) {
732 if (__predict_true(tf->tf_spsr & I32_bit) == 0)
733 enable_interrupts(I32_bit);
734 if (__predict_true(tf->tf_spsr & F32_bit) == 0)
735 enable_interrupts(F32_bit);
736 }
737
738
739 /* See if the cpu state needs to be fixed up */
740 switch (prefetch_abort_fixup(tf, &ksig)) {
741 case ABORT_FIXUP_RETURN:
742 return;
743 case ABORT_FIXUP_FAILED:
744 /* Deliver a SIGILL to the process */
745 ksig.signb = SIGILL;
746 ksig.code = 0;
747 td->td_frame = tf;
748 goto do_trapsignal;
749 default:
750 break;
751 }
752
753 /* Prefetch aborts cannot happen in kernel mode */
754 if (__predict_false(!TRAP_USERMODE(tf)))
755 dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig);
756 sticks = td->td_sticks;
757
758
759 /* Ok validate the address, can only execute in USER space */
760 if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
761 (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
762 ksig.signb = SIGSEGV;
763 ksig.code = 0;
764 goto do_trapsignal;
765 }
766
767 map = &td->td_proc->p_vmspace->vm_map;
768 va = trunc_page(fault_pc);
769
770 /*
771 * See if the pmap can handle this fault on its own...
772 */
773 #ifdef DEBUG
774 last_fault_code = -1;
775 #endif
776 if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
777 goto out;
778
779 if (map != kernel_map) {
780 PROC_LOCK(p);
781 p->p_lock++;
782 PROC_UNLOCK(p);
783 }
784
785 error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE,
786 VM_FAULT_NORMAL);
787 if (map != kernel_map) {
788 PROC_LOCK(p);
789 p->p_lock--;
790 PROC_UNLOCK(p);
791 }
792
793 if (__predict_true(error == 0))
794 goto out;
795
796 if (error == ENOMEM) {
797 printf("VM: pid %d (%s), uid %d killed: "
798 "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm,
799 (td->td_proc->p_ucred) ?
800 td->td_proc->p_ucred->cr_uid : -1);
801 ksig.signb = SIGKILL;
802 } else {
803 ksig.signb = SIGSEGV;
804 }
805 ksig.code = 0;
806
807 do_trapsignal:
808 call_trapsignal(td, ksig.signb, ksig.code);
809
810 out:
811 userret(td, tf, sticks);
812
813 }
814
815 extern int badaddr_read_1(const uint8_t *, uint8_t *);
816 extern int badaddr_read_2(const uint16_t *, uint16_t *);
817 extern int badaddr_read_4(const uint32_t *, uint32_t *);
818 /*
819 * Tentatively read an 8, 16, or 32-bit value from 'addr'.
820 * If the read succeeds, the value is written to 'rptr' and zero is returned.
821 * Else, return EFAULT.
822 */
823 int
824 badaddr_read(void *addr, size_t size, void *rptr)
825 {
826 union {
827 uint8_t v1;
828 uint16_t v2;
829 uint32_t v4;
830 } u;
831 int rv;
832
833 cpu_drain_writebuf();
834
835 /* Read from the test address. */
836 switch (size) {
837 case sizeof(uint8_t):
838 rv = badaddr_read_1(addr, &u.v1);
839 if (rv == 0 && rptr)
840 *(uint8_t *) rptr = u.v1;
841 break;
842
843 case sizeof(uint16_t):
844 rv = badaddr_read_2(addr, &u.v2);
845 if (rv == 0 && rptr)
846 *(uint16_t *) rptr = u.v2;
847 break;
848
849 case sizeof(uint32_t):
850 rv = badaddr_read_4(addr, &u.v4);
851 if (rv == 0 && rptr)
852 *(uint32_t *) rptr = u.v4;
853 break;
854
855 default:
856 panic("badaddr: invalid size (%lu)", (u_long) size);
857 }
858
859 /* Return EFAULT if the address was invalid, else zero */
860 return (rv);
861 }
862
863 #define MAXARGS 8
864 static void
865 syscall(struct thread *td, trapframe_t *frame, u_int32_t insn)
866 {
867 struct proc *p = td->td_proc;
868 int code, error;
869 u_int nap, nargs;
870 register_t *ap, *args, copyargs[MAXARGS];
871 struct sysent *callp;
872 int locked = 0;
873 u_int sticks = 0;
874
875 PCPU_LAZY_INC(cnt.v_syscall);
876 sticks = td->td_sticks;
877 if (td->td_ucred != td->td_proc->p_ucred)
878 cred_update_thread(td);
879 switch (insn & SWI_OS_MASK) {
880 case 0: /* XXX: we need our own one. */
881 nap = 4;
882 break;
883 default:
884 trapsignal(td, SIGILL, 0);
885 userret(td, frame, td->td_sticks);
886 return;
887 }
888 code = insn & 0x000fffff;
889 sticks = td->td_sticks;
890 ap = &frame->tf_r0;
891 if (code == SYS_syscall) {
892 code = *ap++;
893
894 nap--;
895 } else if (code == SYS___syscall) {
896 code = *ap++;
897 nap -= 2;
898 ap++;
899 }
900 if (p->p_sysent->sv_mask)
901 code &= p->p_sysent->sv_mask;
902 if (code >= p->p_sysent->sv_size)
903 callp = &p->p_sysent->sv_table[0];
904 else
905 callp = &p->p_sysent->sv_table[code];
906 nargs = callp->sy_narg & SYF_ARGMASK;
907 memcpy(copyargs, ap, nap * sizeof(register_t));
908 if (nargs > nap) {
909 error = copyin((void *)frame->tf_usr_sp, copyargs + nap,
910 (nargs - nap) * sizeof(register_t));
911 if (error)
912 goto bad;
913 }
914 args = copyargs;
915 error = 0;
916 #ifdef KTRACE
917 if (KTRPOINT(td, KTR_SYSCALL))
918 ktrsyscall(code, nargs, args);
919 #endif
920
921 CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td,
922 td->td_proc->p_pid, td->td_proc->p_comm, code);
923 if ((callp->sy_narg & SYF_MPSAFE) == 0)
924 mtx_lock(&Giant);
925 locked = 1;
926 if (error == 0) {
927 td->td_retval[0] = 0;
928 td->td_retval[1] = 0;
929 STOPEVENT(p, S_SCE, (callp->sy_narg & SYF_ARGMASK));
930 PTRACESTOP_SC(p, td, S_PT_SCE);
931 error = (*callp->sy_call)(td, args);
932 }
933 switch (error) {
934 case 0:
935 #ifdef __ARMEB__
936 if ((insn & 0x000fffff) &&
937 (code != SYS_lseek)) {
938 /*
939 * 64-bit return, 32-bit syscall. Fixup byte order
940 */
941 frame->tf_r0 = 0;
942 frame->tf_r1 = td->td_retval[0];
943 } else {
944 frame->tf_r0 = td->td_retval[0];
945 frame->tf_r1 = td->td_retval[1];
946 }
947 #else
948 frame->tf_r0 = td->td_retval[0];
949 frame->tf_r1 = td->td_retval[1];
950 #endif
951 frame->tf_spsr &= ~PSR_C_bit; /* carry bit */
952 break;
953
954 case ERESTART:
955 /*
956 * Reconstruct the pc to point at the swi.
957 */
958 frame->tf_pc -= INSN_SIZE;
959 break;
960 case EJUSTRETURN:
961 /* nothing to do */
962 break;
963 default:
964 bad:
965 frame->tf_r0 = error;
966 frame->tf_spsr |= PSR_C_bit; /* carry bit */
967 break;
968 }
969 if (locked && (callp->sy_narg & SYF_MPSAFE) == 0)
970 mtx_unlock(&Giant);
971
972
973 userret(td, frame, sticks);
974 CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
975 td->td_proc->p_pid, td->td_proc->p_comm, code);
976
977 STOPEVENT(p, S_SCX, code);
978 PTRACESTOP_SC(p, td, S_PT_SCX);
979 #ifdef KTRACE
980 if (KTRPOINT(td, KTR_SYSRET))
981 ktrsysret(code, error, td->td_retval[0]);
982 #endif
983 mtx_assert(&sched_lock, MA_NOTOWNED);
984 mtx_assert(&Giant, MA_NOTOWNED);
985 }
986
987 void
988 swi_handler(trapframe_t *frame)
989 {
990 struct thread *td = curthread;
991 uint32_t insn;
992
993 td->td_frame = frame;
994
995 if (td->td_proc->p_flag & P_SA)
996 thread_user_enter(td);
997 /*
998 * Make sure the program counter is correctly aligned so we
999 * don't take an alignment fault trying to read the opcode.
1000 */
1001 if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) {
1002 trapsignal(td, SIGILL, 0);
1003 userret(td, frame, td->td_sticks);
1004 return;
1005 }
1006 insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE);
1007 /*
1008 * Enable interrupts if they were enabled before the exception.
1009 * Since all syscalls *should* come from user mode it will always
1010 * be safe to enable them, but check anyway.
1011 */
1012 if (td->td_critnest == 0) {
1013 if (__predict_true(frame->tf_spsr & I32_bit) == 0)
1014 enable_interrupts(I32_bit);
1015 if (__predict_true(frame->tf_spsr & F32_bit) == 0)
1016 enable_interrupts(F32_bit);
1017 }
1018
1019 syscall(td, frame, insn);
1020 }
1021
Cache object: e133ed786fe003cb87850f6a909de0ad
|