FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/trap-v6.c
1 /*-
2 * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5 * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include "opt_ktrace.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/signalvar.h>
43 #include <sys/ktr.h>
44 #include <sys/vmmeter.h>
45 #ifdef KTRACE
46 #include <sys/uio.h>
47 #include <sys/ktrace.h>
48 #endif
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
56
57 #include <machine/cpu.h>
58 #include <machine/frame.h>
59 #include <machine/machdep.h>
60 #include <machine/pcb.h>
61
62 #ifdef KDB
63 #include <sys/kdb.h>
64 #include <machine/db_machdep.h>
65 #endif
66
67 #ifdef KDTRACE_HOOKS
68 #include <sys/dtrace_bsd.h>
69 #endif
70
71 extern char cachebailout[];
72
73 #ifdef DEBUG
74 int last_fault_code; /* For the benefit of pmap_fault_fixup() */
75 #endif
76
77 struct ksig {
78 int sig;
79 u_long code;
80 vm_offset_t addr;
81 };
82
83 typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
84 struct thread *, struct ksig *);
85
86 static abort_func_t abort_fatal;
87 static abort_func_t abort_align;
88 static abort_func_t abort_icache;
89
90 struct abort {
91 abort_func_t *func;
92 const char *desc;
93 };
94
95 /*
96 * How are the aborts handled?
97 *
98 * Undefined Code:
99 * - Always fatal as we do not know what does it mean.
100 * Imprecise External Abort:
101 * - Always fatal, but can be handled somehow in the future.
102 * Now, due to PCIe buggy hardware, ignored.
103 * Precise External Abort:
104 * - Always fatal, but who knows in the future???
105 * Debug Event:
106 * - Special handling.
107 * External Translation Abort (L1 & L2)
108 * - Always fatal as something is screwed up in page tables or hardware.
109 * Domain Fault (L1 & L2):
110 * - Always fatal as we do not play game with domains.
111 * Alignment Fault:
112 * - Everything should be aligned in kernel with exception of user to kernel
113 * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
114 * We generate signal in case of abort from user mode.
115 * Instruction cache maintenance:
116 * - According to manual, this is translation fault during cache maintenance
117 * operation. So, it could be really complex in SMP case and fuzzy too
118 * for cache operations working on virtual addresses. For now, we will
119 * consider this abort as fatal. In fact, no cache maintenance on
120 * not mapped virtual addresses should be called. As cache maintenance
121 * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
122 * the abort is fatal for user mode as well for now. (This is good place to
123 * note that cache maintenance on virtual address fill TLB.)
124 * Acces Bit (L1 & L2):
125 * - Fast hardware emulation for kernel and user mode.
126 * Translation Fault (L1 & L2):
127 * - Standard fault mechanism is held including vm_fault().
128 * Permission Fault (L1 & L2):
129 * - Fast hardware emulation of modify bits and in other cases, standard
130 * fault mechanism is held including vm_fault().
131 */
132
133 static const struct abort aborts[] = {
134 {abort_fatal, "Undefined Code (0x000)"},
135 {abort_align, "Alignment Fault"},
136 {abort_fatal, "Debug Event"},
137 {NULL, "Access Bit (L1)"},
138 {NULL, "Instruction cache maintenance"},
139 {NULL, "Translation Fault (L1)"},
140 {NULL, "Access Bit (L2)"},
141 {NULL, "Translation Fault (L2)"},
142
143 {abort_fatal, "External Abort"},
144 {abort_fatal, "Domain Fault (L1)"},
145 {abort_fatal, "Undefined Code (0x00A)"},
146 {abort_fatal, "Domain Fault (L2)"},
147 {abort_fatal, "External Translation Abort (L1)"},
148 {NULL, "Permission Fault (L1)"},
149 {abort_fatal, "External Translation Abort (L2)"},
150 {NULL, "Permission Fault (L2)"},
151
152 {abort_fatal, "TLB Conflict Abort"},
153 {abort_fatal, "Undefined Code (0x401)"},
154 {abort_fatal, "Undefined Code (0x402)"},
155 {abort_fatal, "Undefined Code (0x403)"},
156 {abort_fatal, "Undefined Code (0x404)"},
157 {abort_fatal, "Undefined Code (0x405)"},
158 {abort_fatal, "Asynchronous External Abort"},
159 {abort_fatal, "Undefined Code (0x407)"},
160
161 {abort_fatal, "Asynchronous Parity Error on Memory Access"},
162 {abort_fatal, "Parity Error on Memory Access"},
163 {abort_fatal, "Undefined Code (0x40A)"},
164 {abort_fatal, "Undefined Code (0x40B)"},
165 {abort_fatal, "Parity Error on Translation (L1)"},
166 {abort_fatal, "Undefined Code (0x40D)"},
167 {abort_fatal, "Parity Error on Translation (L2)"},
168 {abort_fatal, "Undefined Code (0x40F)"}
169 };
170
171 static __inline void
172 call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr,
173 int trapno)
174 {
175 ksiginfo_t ksi;
176
177 CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
178 __func__, addr, sig, code);
179
180 /*
181 * TODO: some info would be nice to know
182 * if we are serving data or prefetch abort.
183 */
184
185 ksiginfo_init_trap(&ksi);
186 ksi.ksi_signo = sig;
187 ksi.ksi_code = code;
188 ksi.ksi_addr = (void *)addr;
189 ksi.ksi_trapno = trapno;
190 trapsignal(td, &ksi);
191 }
192
193 /*
194 * abort_imprecise() handles the following abort:
195 *
196 * FAULT_EA_IMPREC - Imprecise External Abort
197 *
198 * The imprecise means that we don't know where the abort happened,
199 * thus FAR is undefined. The abort should not never fire, but hot
200 * plugging or accidental hardware failure can be the cause of it.
201 * If the abort happens, it can even be on different (thread) context.
202 * Without any additional support, the abort is fatal, as we do not
203 * know what really happened.
204 *
205 * QQQ: Some additional functionality, like pcb_onfault but global,
206 * can be implemented. Imprecise handlers could be registered
207 * which tell us if the abort is caused by something they know
208 * about. They should return one of three codes like:
209 * FAULT_IS_MINE,
210 * FAULT_CAN_BE_MINE,
211 * FAULT_IS_NOT_MINE.
212 * The handlers should be called until some of them returns
213 * FAULT_IS_MINE value or all was called. If all handlers return
214 * FAULT_IS_NOT_MINE value, then the abort is fatal.
215 */
216 static __inline void
217 abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode)
218 {
219
220 /*
221 * XXX - We can got imprecise abort as result of access
222 * to not-present PCI/PCIe configuration space.
223 */
224 #if 0
225 goto out;
226 #endif
227 abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
228
229 /*
230 * Returning from this function means that we ignore
231 * the abort for good reason. Note that imprecise abort
232 * could fire any time even in user mode.
233 */
234
235 #if 0
236 out:
237 if (usermode)
238 userret(curthread, tf);
239 #endif
240 }
241
242 /*
243 * abort_debug() handles the following abort:
244 *
245 * FAULT_DEBUG - Debug Event
246 *
247 */
248 static __inline void
249 abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode,
250 u_int far)
251 {
252
253 if (usermode) {
254 struct thread *td;
255
256 td = curthread;
257 call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far, FAULT_DEBUG);
258 userret(td, tf);
259 } else {
260 #ifdef KDB
261 kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf);
262 #else
263 printf("No debugger in kernel.\n");
264 #endif
265 }
266 }
267
268 /*
269 * Abort handler.
270 *
271 * FAR, FSR, and everything what can be lost after enabling
272 * interrupts must be grabbed before the interrupts will be
273 * enabled. Note that when interrupts will be enabled, we
274 * could even migrate to another CPU ...
275 *
276 * TODO: move quick cases to ASM
277 */
278 void
279 abort_handler(struct trapframe *tf, int prefetch)
280 {
281 struct thread *td;
282 vm_offset_t far, va;
283 int idx, rv;
284 uint32_t fsr;
285 struct ksig ksig;
286 struct proc *p;
287 struct pcb *pcb;
288 struct vm_map *map;
289 struct vmspace *vm;
290 vm_prot_t ftype;
291 bool usermode;
292 int bp_harden, ucode;
293 #ifdef INVARIANTS
294 void *onfault;
295 #endif
296
297 VM_CNT_INC(v_trap);
298 td = curthread;
299
300 fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
301 #if __ARM_ARCH >= 7
302 far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
303 #else
304 far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
305 #endif
306
307 idx = FSR_TO_FAULT(fsr);
308 usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */
309
310 /*
311 * Apply BP hardening by flushing the branch prediction cache
312 * for prefaults on kernel addresses.
313 */
314 if (__predict_false(prefetch && far > VM_MAXUSER_ADDRESS &&
315 (idx == FAULT_TRAN_L2 || idx == FAULT_PERM_L2))) {
316 bp_harden = PCPU_GET(bp_harden_kind);
317 if (bp_harden == PCPU_BP_HARDEN_KIND_BPIALL)
318 _CP15_BPIALL();
319 else if (bp_harden == PCPU_BP_HARDEN_KIND_ICIALLU)
320 _CP15_ICIALLU();
321 }
322
323 if (usermode)
324 td->td_frame = tf;
325
326 CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d",
327 __func__, fsr, idx, far, prefetch, usermode);
328
329 /*
330 * Firstly, handle aborts that are not directly related to mapping.
331 */
332 if (__predict_false(idx == FAULT_EA_IMPREC)) {
333 abort_imprecise(tf, fsr, prefetch, usermode);
334 return;
335 }
336
337 if (__predict_false(idx == FAULT_DEBUG)) {
338 abort_debug(tf, fsr, prefetch, usermode, far);
339 return;
340 }
341
342 /*
343 * ARM has a set of unprivileged load and store instructions
344 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other
345 * than user mode and OS should recognize their aborts and behave
346 * appropriately. However, there is no way how to do that reasonably
347 * in general unless we restrict the handling somehow.
348 *
349 * For now, these instructions are used only in copyin()/copyout()
350 * like functions where usermode buffers are checked in advance that
351 * they are not from KVA space. Thus, no action is needed here.
352 */
353
354 /*
355 * (1) Handle access and R/W hardware emulation aborts.
356 * (2) Check that abort is not on pmap essential address ranges.
357 * There is no way how to fix it, so we don't even try.
358 */
359 rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
360 if (rv == KERN_SUCCESS)
361 return;
362 #ifdef KDB
363 if (kdb_active) {
364 kdb_reenter();
365 goto out;
366 }
367 #endif
368 if (rv == KERN_INVALID_ADDRESS)
369 goto nogo;
370
371 if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
372 /*
373 * Due to both processor errata and lazy TLB invalidation when
374 * access restrictions are removed from virtual pages, memory
375 * accesses that are allowed by the physical mapping layer may
376 * nonetheless cause one spurious page fault per virtual page.
377 * When the thread is executing a "no faulting" section that
378 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
379 * every page fault is treated as a spurious page fault,
380 * unless it accesses the same virtual address as the most
381 * recent page fault within the same "no faulting" section.
382 */
383 if (td->td_md.md_spurflt_addr != far ||
384 (td->td_pflags & TDP_RESETSPUR) != 0) {
385 td->td_md.md_spurflt_addr = far;
386 td->td_pflags &= ~TDP_RESETSPUR;
387
388 tlb_flush_local(far & ~PAGE_MASK);
389 return;
390 }
391 } else {
392 /*
393 * If we get a page fault while in a critical section, then
394 * it is most likely a fatal kernel page fault. The kernel
395 * is already going to panic trying to get a sleep lock to
396 * do the VM lookup, so just consider it a fatal trap so the
397 * kernel can print out a useful trap message and even get
398 * to the debugger.
399 *
400 * If we get a page fault while holding a non-sleepable
401 * lock, then it is most likely a fatal kernel page fault.
402 * If WITNESS is enabled, then it's going to whine about
403 * bogus LORs with various VM locks, so just skip to the
404 * fatal trap handling directly.
405 */
406 if (td->td_critnest != 0 ||
407 WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
408 "Kernel page fault") != 0) {
409 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
410 return;
411 }
412 }
413
414 /* Re-enable interrupts if they were enabled previously. */
415 if (td->td_md.md_spinlock_count == 0) {
416 if (__predict_true(tf->tf_spsr & PSR_I) == 0)
417 enable_interrupts(PSR_I);
418 if (__predict_true(tf->tf_spsr & PSR_F) == 0)
419 enable_interrupts(PSR_F);
420 }
421
422 p = td->td_proc;
423 if (usermode) {
424 td->td_pticks = 0;
425 if (td->td_cowgen != p->p_cowgen)
426 thread_cow_update(td);
427 }
428
429 /* Invoke the appropriate handler, if necessary. */
430 if (__predict_false(aborts[idx].func != NULL)) {
431 if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
432 goto do_trapsignal;
433 goto out;
434 }
435
436 /*
437 * At this point, we're dealing with one of the following aborts:
438 *
439 * FAULT_ICACHE - I-cache maintenance
440 * FAULT_TRAN_xx - Translation
441 * FAULT_PERM_xx - Permission
442 */
443
444 /*
445 * Don't pass faulting cache operation to vm_fault(). We don't want
446 * to handle all vm stuff at this moment.
447 */
448 pcb = td->td_pcb;
449 if (__predict_false(pcb->pcb_onfault == cachebailout)) {
450 tf->tf_r0 = far; /* return failing address */
451 tf->tf_pc = (register_t)pcb->pcb_onfault;
452 return;
453 }
454
455 /* Handle remaining I-cache aborts. */
456 if (idx == FAULT_ICACHE) {
457 if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
458 goto do_trapsignal;
459 goto out;
460 }
461
462 va = trunc_page(far);
463 if (va >= KERNBASE) {
464 /*
465 * Don't allow user-mode faults in kernel address space.
466 */
467 if (usermode) {
468 ksig.sig = SIGSEGV;
469 ksig.code = SEGV_ACCERR;
470 goto nogo;
471 }
472
473 map = kernel_map;
474 } else {
475 /*
476 * This is a fault on non-kernel virtual memory. If curproc
477 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
478 */
479 vm = (p != NULL) ? p->p_vmspace : NULL;
480 if (vm == NULL) {
481 ksig.sig = SIGSEGV;
482 ksig.code = 0;
483 goto nogo;
484 }
485
486 map = &vm->vm_map;
487 if (!usermode && (td->td_intr_nesting_level != 0 ||
488 pcb->pcb_onfault == NULL)) {
489 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
490 return;
491 }
492 }
493
494 ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
495 if (prefetch)
496 ftype |= VM_PROT_EXECUTE;
497
498 #ifdef DEBUG
499 last_fault_code = fsr;
500 #endif
501
502 #ifdef INVARIANTS
503 onfault = pcb->pcb_onfault;
504 pcb->pcb_onfault = NULL;
505 #endif
506
507 /* Fault in the page. */
508 rv = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL, &ksig.sig,
509 &ucode);
510 ksig.code = ucode;
511
512 #ifdef INVARIANTS
513 pcb->pcb_onfault = onfault;
514 #endif
515
516 if (__predict_true(rv == KERN_SUCCESS))
517 goto out;
518 nogo:
519 if (!usermode) {
520 if (td->td_intr_nesting_level == 0 &&
521 pcb->pcb_onfault != NULL) {
522 tf->tf_r0 = rv;
523 tf->tf_pc = (int)pcb->pcb_onfault;
524 return;
525 }
526 CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
527 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
528 return;
529 }
530
531 ksig.addr = far;
532
533 do_trapsignal:
534 call_trapsignal(td, ksig.sig, ksig.code, ksig.addr, idx);
535 out:
536 if (usermode)
537 userret(td, tf);
538 }
539
540 /*
541 * abort_fatal() handles the following data aborts:
542 *
543 * FAULT_DEBUG - Debug Event
544 * FAULT_ACCESS_xx - Acces Bit
545 * FAULT_EA_PREC - Precise External Abort
546 * FAULT_DOMAIN_xx - Domain Fault
547 * FAULT_EA_TRAN_xx - External Translation Abort
548 * FAULT_EA_IMPREC - Imprecise External Abort
549 * + all undefined codes for ABORT
550 *
551 * We should never see these on a properly functioning system.
552 *
553 * This function is also called by the other handlers if they
554 * detect a fatal problem.
555 *
556 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
557 */
558 static int
559 abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
560 u_int prefetch, struct thread *td, struct ksig *ksig)
561 {
562 bool usermode;
563 const char *mode;
564 const char *rw_mode;
565 #ifdef KDB
566 bool handled;
567 #endif
568
569 usermode = TRAPF_USERMODE(tf);
570 #ifdef KDTRACE_HOOKS
571 if (!usermode) {
572 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far))
573 return (0);
574 }
575 #endif
576
577 mode = usermode ? "user" : "kernel";
578 rw_mode = fsr & FSR_WNR ? "write" : "read";
579 disable_interrupts(PSR_I|PSR_F);
580
581 if (td != NULL) {
582 printf("Fatal %s mode data abort: '%s' on %s\n", mode,
583 aborts[idx].desc, rw_mode);
584 printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
585 if (idx != FAULT_EA_IMPREC)
586 printf("%08x, ", far);
587 else
588 printf("Invalid, ");
589 printf("spsr=%08x\n", tf->tf_spsr);
590 } else {
591 printf("Fatal %s mode prefetch abort at 0x%08x\n",
592 mode, tf->tf_pc);
593 printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
594 }
595
596 printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
597 tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
598 printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
599 tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
600 printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
601 tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
602 printf("r12=%08x, ", tf->tf_r12);
603
604 if (usermode)
605 printf("usp=%08x, ulr=%08x",
606 tf->tf_usr_sp, tf->tf_usr_lr);
607 else
608 printf("ssp=%08x, slr=%08x",
609 tf->tf_svc_sp, tf->tf_svc_lr);
610 printf(", pc =%08x\n\n", tf->tf_pc);
611
612 #ifdef KDB
613 if (debugger_on_trap) {
614 kdb_why = KDB_WHY_TRAP;
615 handled = kdb_trap(fsr, 0, tf);
616 kdb_why = KDB_WHY_UNSET;
617 if (handled)
618 return (0);
619 }
620 #endif
621 panic("Fatal abort");
622 /*NOTREACHED*/
623 }
624
625 /*
626 * abort_align() handles the following data abort:
627 *
628 * FAULT_ALIGN - Alignment fault
629 *
630 * Everything should be aligned in kernel with exception of user to kernel
631 * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
632 * We generate signal in case of abort from user mode.
633 */
634 static int
635 abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
636 u_int prefetch, struct thread *td, struct ksig *ksig)
637 {
638 bool usermode;
639
640 usermode = TRAPF_USERMODE(tf);
641 if (!usermode) {
642 if (td->td_intr_nesting_level == 0 && td != NULL &&
643 td->td_pcb->pcb_onfault != NULL) {
644 tf->tf_r0 = EFAULT;
645 tf->tf_pc = (int)td->td_pcb->pcb_onfault;
646 return (0);
647 }
648 abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
649 }
650 /* Deliver a bus error signal to the process */
651 ksig->code = BUS_ADRALN;
652 ksig->sig = SIGBUS;
653 ksig->addr = far;
654 return (1);
655 }
656
657 /*
658 * abort_icache() handles the following data abort:
659 *
660 * FAULT_ICACHE - Instruction cache maintenance
661 *
662 * According to manual, FAULT_ICACHE is translation fault during cache
663 * maintenance operation. In fact, no cache maintenance operation on
664 * not mapped virtual addresses should be called. As cache maintenance
665 * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
666 * the abort is concider as fatal for now. However, all the matter with
667 * cache maintenance operation on virtual addresses could be really complex
668 * and fuzzy in SMP case, so maybe in future standard fault mechanism
669 * should be held here including vm_fault() calling.
670 */
671 static int
672 abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
673 u_int prefetch, struct thread *td, struct ksig *ksig)
674 {
675
676 abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
677 return(0);
678 }
Cache object: ce130dad30e9cd78cb4ca6a1df5a7767
|