FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/trap-v6.c
1 /*-
2 * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5 * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include "opt_ktrace.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/10.2/sys/arm/arm/trap-v6.c 278731 2015-02-13 23:32:03Z ian $");
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/signalvar.h>
43 #include <sys/ktr.h>
44 #ifdef KTRACE
45 #include <sys/uio.h>
46 #include <sys/ktrace.h>
47 #endif
48
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_param.h>
55
56 #include <machine/cpu.h>
57 #include <machine/cpu-v6.h>
58 #include <machine/frame.h>
59 #include <machine/machdep.h>
60 #include <machine/pcb.h>
61 #include <machine/vmparam.h>
62
63 #ifdef KDB
64 #include <sys/kdb.h>
65 #include <machine/db_machdep.h>
66 #endif
67
68 extern char fusubailout[];
69
70 #ifdef DEBUG
71 int last_fault_code; /* For the benefit of pmap_fault_fixup() */
72 #endif
73
74 struct ksig {
75 int sig;
76 u_long code;
77 vm_offset_t addr;
78 };
79
80 typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
81 struct thread *, struct ksig *);
82
83 static abort_func_t abort_fatal;
84 static abort_func_t abort_align;
85 static abort_func_t abort_icache;
86
87 struct abort {
88 abort_func_t *func;
89 const char *desc;
90 };
91
92 /*
93 * How are the aborts handled?
94 *
95 * Undefined Code:
96 * - Always fatal as we do not know what does it mean.
97 * Imprecise External Abort:
98 * - Always fatal, but can be handled somehow in the future.
99 * Now, due to PCIe buggy harware, ignored.
100 * Precise External Abort:
101 * - Always fatal, but who knows in the future???
102 * Debug Event:
103 * - Special handling.
104 * External Translation Abort (L1 & L2)
105 * - Always fatal as something is screwed up in page tables or harware.
106 * Domain Fault (L1 & L2):
107 * - Always fatal as we do not play game with domains.
108 * Alignment Fault:
109 * - Everything should be aligned in kernel including user to kernel and
110 * vice versa data copying, so we ignore pcb_onfault, and it's always fatal.
111 * We generate signal in case of abort from user mode.
112 * Instruction cache maintenance:
113 * - According to manual, this is translation fault during cache maintenance
114 * operation. So, it could be really complex in SMP case and fuzzy too
115 * for cache operations working on virtual addresses. For now, we will
116 * consider this abort as fatal. In fact, no cache maintenance on
117 * not mapped virtual addresses should be called. As cache maintenance
118 * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
119 * the abort is fatal for user mode as well for now. (This is good place to
120 * note that cache maintenance on virtual address fill TLB.)
121 * Acces Bit (L1 & L2):
122 * - Fast hardware emulation for kernel and user mode.
123 * Translation Fault (L1 & L2):
124 * - Standard fault mechanism is held including vm_fault().
125 * Permission Fault (L1 & L2):
126 * - Fast harware emulation of modify bits and in other cases, standard
127 * fault mechanism is held including vm_fault().
128 */
129
130 static const struct abort aborts[] = {
131 {abort_fatal, "Undefined Code (0x000)"},
132 {abort_align, "Alignment Fault"},
133 {abort_fatal, "Debug Event"},
134 {NULL, "Access Bit (L1)"},
135 {abort_icache, "Instruction cache maintenance"},
136 {NULL, "Translation Fault (L1)"},
137 {NULL, "Access Bit (L2)"},
138 {NULL, "Translation Fault (L2)"},
139
140 {abort_fatal, "External Abort"},
141 {abort_fatal, "Domain Fault (L1)"},
142 {abort_fatal, "Undefined Code (0x00A)"},
143 {abort_fatal, "Domain Fault (L2)"},
144 {abort_fatal, "External Translation Abort (L1)"},
145 {NULL, "Permission Fault (L1)"},
146 {abort_fatal, "External Translation Abort (L2)"},
147 {NULL, "Permission Fault (L2)"},
148
149 {abort_fatal, "TLB Conflict Abort"},
150 {abort_fatal, "Undefined Code (0x401)"},
151 {abort_fatal, "Undefined Code (0x402)"},
152 {abort_fatal, "Undefined Code (0x403)"},
153 {abort_fatal, "Undefined Code (0x404)"},
154 {abort_fatal, "Undefined Code (0x405)"},
155 {abort_fatal, "Asynchronous External Abort"},
156 {abort_fatal, "Undefined Code (0x407)"},
157
158 {abort_fatal, "Asynchronous Parity Error on Memory Access"},
159 {abort_fatal, "Parity Error on Memory Access"},
160 {abort_fatal, "Undefined Code (0x40A)"},
161 {abort_fatal, "Undefined Code (0x40B)"},
162 {abort_fatal, "Parity Error on Translation (L1)"},
163 {abort_fatal, "Undefined Code (0x40D)"},
164 {abort_fatal, "Parity Error on Translation (L2)"},
165 {abort_fatal, "Undefined Code (0x40F)"}
166 };
167
168
169 static __inline void
170 call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr)
171 {
172 ksiginfo_t ksi;
173
174 CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
175 __func__, addr, sig, code);
176
177 /*
178 * TODO: some info would be nice to know
179 * if we are serving data or prefetch abort.
180 */
181
182 ksiginfo_init_trap(&ksi);
183 ksi.ksi_signo = sig;
184 ksi.ksi_code = code;
185 ksi.ksi_addr = (void *)addr;
186 trapsignal(td, &ksi);
187 }
188
189 /*
190 * abort_imprecise() handles the following abort:
191 *
192 * FAULT_EA_IMPREC - Imprecise External Abort
193 *
194 * The imprecise means that we don't know where the abort happened,
195 * thus FAR is undefined. The abort should not never fire, but hot
196 * plugging or accidental harware failure can be the cause of it.
197 * If the abort happens, it can even be on different (thread) context.
198 * Without any additional support, the abort is fatal, as we do not
199 * know what really happened.
200 *
201 * QQQ: Some additional functionality, like pcb_onfault but global,
202 * can be implemented. Imprecise handlers could be registered
203 * which tell us if the abort is caused by something they know
204 * about. They should return one of three codes like:
205 * FAULT_IS_MINE,
206 * FAULT_CAN_BE_MINE,
207 * FAULT_IS_NOT_MINE.
208 * The handlers should be called until some of them returns
209 * FAULT_IS_MINE value or all was called. If all handlers return
210 * FAULT_IS_NOT_MINE value, then the abort is fatal.
211 */
212 static __inline void
213 abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, u_int usermode)
214 {
215 /* XXXX We can got imprecise abort as result of access
216 * to not-present PCI/PCIe configuration space.
217 */
218 #if 0
219 goto out;
220 #endif
221 abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
222
223 /*
224 * Returning from this function means that we ignore
225 * the abort for good reason. Note that imprecise abort
226 * could fire any time even in user mode.
227 */
228
229 #if 0
230 out:
231 if (usermode)
232 userret(curthread, tf);
233 #endif
234 }
235
236 /*
237 * abort_debug() handles the following abort:
238 *
239 * FAULT_DEBUG - Debug Event
240 *
241 */
242 static __inline void
243 abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, u_int usermode,
244 u_int far)
245 {
246 if (usermode) {
247 struct thread *td;
248
249 td = curthread;
250 call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far);
251 userret(td, tf);
252 } else {
253 #ifdef KDB
254 kdb_trap(T_BREAKPOINT, 0, tf);
255 #else
256 printf("No debugger in kernel.\n");
257 #endif
258 }
259 }
260
261 /*
262 * Abort handler.
263 *
264 * FAR, FSR, and everything what can be lost after enabling
265 * interrupts must be grabbed before the interrupts will be
266 * enabled. Note that when interrupts will be enabled, we
267 * could even migrate to another CPU ...
268 *
269 * TODO: move quick cases to ASM
270 */
271 void
272 abort_handler(struct trapframe *tf, int prefetch)
273 {
274 struct thread *td;
275 vm_offset_t far, va;
276 int idx, usermode;
277 uint32_t fsr;
278 struct ksig ksig;
279 struct proc *p;
280 struct pcb *pcb;
281 struct vm_map *map;
282 struct vmspace *vm;
283 vm_prot_t ftype;
284 int rv;
285 #ifdef INVARIANTS
286 void *onfault;
287 #endif
288 td = curthread;
289 fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
290 far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
291
292 idx = FSR_TO_FAULT(fsr);
293 usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */
294 if (usermode)
295 td->td_frame = tf;
296
297 CTR4(KTR_TRAP, "abort_handler: fsr %#x (idx %u) far %#x prefetch %u",
298 fsr, idx, far, prefetch);
299
300 /*
301 * Firstly, handle aborts that are not directly related to mapping.
302 */
303 if (__predict_false(idx == FAULT_EA_IMPREC)) {
304 abort_imprecise(tf, fsr, prefetch, usermode);
305 return;
306 }
307
308 if (__predict_false(idx == FAULT_DEBUG)) {
309 abort_debug(tf, fsr, prefetch, usermode, far);
310 return;
311 }
312
313 #ifdef ARM_NEW_PMAP
314 rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
315 if (rv == 0) {
316 return;
317 } else if (rv == EFAULT) {
318
319 call_trapsignal(td, SIGSEGV, SEGV_MAPERR, far);
320 userret(td, tf);
321 return;
322 }
323 #endif
324 /*
325 * Now, when we handled imprecise and debug aborts, the rest of
326 * aborts should be really related to mapping.
327 *
328 */
329
330 PCPU_INC(cnt.v_trap);
331
332 #ifdef KDB
333 if (kdb_active) {
334 kdb_reenter();
335 goto out;
336 }
337 #endif
338 if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
339 /*
340 * Due to both processor errata and lazy TLB invalidation when
341 * access restrictions are removed from virtual pages, memory
342 * accesses that are allowed by the physical mapping layer may
343 * nonetheless cause one spurious page fault per virtual page.
344 * When the thread is executing a "no faulting" section that
345 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
346 * every page fault is treated as a spurious page fault,
347 * unless it accesses the same virtual address as the most
348 * recent page fault within the same "no faulting" section.
349 */
350 if (td->td_md.md_spurflt_addr != far ||
351 (td->td_pflags & TDP_RESETSPUR) != 0) {
352 td->td_md.md_spurflt_addr = far;
353 td->td_pflags &= ~TDP_RESETSPUR;
354
355 tlb_flush_local(far & ~PAGE_MASK);
356 return;
357 }
358 } else {
359 /*
360 * If we get a page fault while in a critical section, then
361 * it is most likely a fatal kernel page fault. The kernel
362 * is already going to panic trying to get a sleep lock to
363 * do the VM lookup, so just consider it a fatal trap so the
364 * kernel can print out a useful trap message and even get
365 * to the debugger.
366 *
367 * If we get a page fault while holding a non-sleepable
368 * lock, then it is most likely a fatal kernel page fault.
369 * If WITNESS is enabled, then it's going to whine about
370 * bogus LORs with various VM locks, so just skip to the
371 * fatal trap handling directly.
372 */
373 if (td->td_critnest != 0 ||
374 WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
375 "Kernel page fault") != 0) {
376 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
377 return;
378 }
379 }
380
381 /* Re-enable interrupts if they were enabled previously. */
382 if (td->td_md.md_spinlock_count == 0) {
383 if (__predict_true(tf->tf_spsr & PSR_I) == 0)
384 enable_interrupts(PSR_I);
385 if (__predict_true(tf->tf_spsr & PSR_F) == 0)
386 enable_interrupts(PSR_F);
387 }
388
389 p = td->td_proc;
390 if (usermode) {
391 td->td_pticks = 0;
392 if (td->td_ucred != p->p_ucred)
393 cred_update_thread(td);
394 }
395
396 /* Invoke the appropriate handler, if necessary. */
397 if (__predict_false(aborts[idx].func != NULL)) {
398 if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
399 goto do_trapsignal;
400 goto out;
401 }
402
403 /*
404 * At this point, we're dealing with one of the following aborts:
405 *
406 * FAULT_TRAN_xx - Translation
407 * FAULT_PERM_xx - Permission
408 *
409 * These are the main virtual memory-related faults signalled by
410 * the MMU.
411 */
412
413 /* fusubailout is used by [fs]uswintr to avoid page faulting */
414 pcb = td->td_pcb;
415 if (__predict_false(pcb->pcb_onfault == fusubailout)) {
416 tf->tf_r0 = EFAULT;
417 tf->tf_pc = (register_t)pcb->pcb_onfault;
418 return;
419 }
420
421 /*
422 * QQQ: ARM has a set of unprivileged load and store instructions
423 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used
424 * in other than user mode and OS should recognize their
425 * aborts and behaved appropriately. However, there is no way
426 * how to do that reasonably in general unless we restrict
427 * the handling somehow. One way is to limit the handling for
428 * aborts which come from undefined mode only.
429 *
430 * Anyhow, we do not use these instructions and do not implement
431 * any special handling for them.
432 */
433
434 va = trunc_page(far);
435 if (va >= KERNBASE) {
436 /*
437 * Don't allow user-mode faults in kernel address space.
438 */
439 if (usermode)
440 goto nogo;
441
442 map = kernel_map;
443 } else {
444 /*
445 * This is a fault on non-kernel virtual memory. If curproc
446 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
447 */
448 vm = (p != NULL) ? p->p_vmspace : NULL;
449 if (vm == NULL)
450 goto nogo;
451
452 map = &vm->vm_map;
453 if (!usermode && (td->td_intr_nesting_level != 0 ||
454 pcb->pcb_onfault == NULL)) {
455 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
456 return;
457 }
458 }
459
460 ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
461 if (prefetch)
462 ftype |= VM_PROT_EXECUTE;
463
464 #ifdef DEBUG
465 last_fault_code = fsr;
466 #endif
467
468 #ifndef ARM_NEW_PMAP
469 if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
470 usermode)) {
471 goto out;
472 }
473 #endif
474
475 #ifdef INVARIANTS
476 onfault = pcb->pcb_onfault;
477 pcb->pcb_onfault = NULL;
478 #endif
479 if (map != kernel_map) {
480 /*
481 * Keep swapout from messing with us during this
482 * critical time.
483 */
484 PROC_LOCK(p);
485 ++p->p_lock;
486 PROC_UNLOCK(p);
487
488 /* Fault in the user page: */
489 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
490
491 PROC_LOCK(p);
492 --p->p_lock;
493 PROC_UNLOCK(p);
494 } else {
495 /*
496 * Don't have to worry about process locking or stacks in the
497 * kernel.
498 */
499 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
500 }
501
502 #ifdef INVARIANTS
503 pcb->pcb_onfault = onfault;
504 #endif
505
506 if (__predict_true(rv == KERN_SUCCESS))
507 goto out;
508 nogo:
509 if (!usermode) {
510 if (td->td_intr_nesting_level == 0 &&
511 pcb->pcb_onfault != NULL) {
512 tf->tf_r0 = rv;
513 tf->tf_pc = (int)pcb->pcb_onfault;
514 return;
515 }
516 CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
517 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
518 return;
519 }
520
521 ksig.sig = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
522 ksig.code = 0;
523 ksig.addr = far;
524
525 do_trapsignal:
526 call_trapsignal(td, ksig.sig, ksig.code, ksig.addr);
527 out:
528 if (usermode)
529 userret(td, tf);
530 }
531
532 /*
533 * abort_fatal() handles the following data aborts:
534
535 * FAULT_DEBUG - Debug Event
536 * FAULT_ACCESS_xx - Acces Bit
537 * FAULT_EA_PREC - Precise External Abort
538 * FAULT_DOMAIN_xx - Domain Fault
539 * FAULT_EA_TRAN_xx - External Translation Abort
540 * FAULT_EA_IMPREC - Imprecise External Abort
541 * + all undefined codes for ABORT
542 *
543 * We should never see these on a properly functioning system.
544 *
545 * This function is also called by the other handlers if they
546 * detect a fatal problem.
547 *
548 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
549 */
550 static int
551 abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch,
552 struct thread *td, struct ksig *ksig)
553 {
554 u_int usermode;
555 const char *mode;
556 const char *rw_mode;
557
558 usermode = TRAPF_USERMODE(tf);
559 mode = usermode ? "user" : "kernel";
560 rw_mode = fsr & FSR_WNR ? "write" : "read";
561 disable_interrupts(PSR_I|PSR_F);
562
563 if (td != NULL) {
564 printf("Fatal %s mode data abort: '%s' on %s\n", mode,
565 aborts[idx].desc, rw_mode);
566 printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
567 if (idx != FAULT_EA_IMPREC)
568 printf("%08x, ", far);
569 else
570 printf("Invalid, ");
571 printf("spsr=%08x\n", tf->tf_spsr);
572 } else {
573 printf("Fatal %s mode prefetch abort at 0x%08x\n",
574 mode, tf->tf_pc);
575 printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
576 }
577
578 printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
579 tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
580 printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
581 tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
582 printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
583 tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
584 printf("r12=%08x, ", tf->tf_r12);
585
586 if (usermode)
587 printf("usp=%08x, ulr=%08x",
588 tf->tf_usr_sp, tf->tf_usr_lr);
589 else
590 printf("ssp=%08x, slr=%08x",
591 tf->tf_svc_sp, tf->tf_svc_lr);
592 printf(", pc =%08x\n\n", tf->tf_pc);
593
594 #ifdef KDB
595 if (debugger_on_panic || kdb_active)
596 kdb_trap(fsr, 0, tf);
597 #endif
598 panic("Fatal abort");
599 /*NOTREACHED*/
600 }
601
602 /*
603 * abort_align() handles the following data abort:
604 *
605 * FAULT_ALIGN - Alignment fault
606 *
607 * Every memory access should be correctly aligned in kernel including
608 * user to kernel and vice versa data copying, so we ignore pcb_onfault,
609 * and it's always fatal. We generate a signal in case of abort from user mode.
610 */
611 static int
612 abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch,
613 struct thread *td, struct ksig *ksig)
614 {
615 u_int usermode;
616
617 usermode = TRAPF_USERMODE(tf);
618
619 /*
620 * Alignment faults are always fatal if they occur in any but user mode.
621 *
622 * XXX The old trap code handles pcb fault even for alignment traps.
623 * Unfortunately, we don't known why and if is this need.
624 */
625 if (!usermode) {
626 if (td->td_intr_nesting_level == 0 && td != NULL &&
627 td->td_pcb->pcb_onfault != NULL) {
628 printf("%s: Got alignment fault with pcb_onfault set"
629 ", please report this issue\n", __func__);
630 tf->tf_r0 = EFAULT;;
631 tf->tf_pc = (int)td->td_pcb->pcb_onfault;
632 return (0);
633 }
634 abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
635 }
636 /* Deliver a bus error signal to the process */
637 ksig->code = 0;
638 ksig->sig = SIGBUS;
639 ksig->addr = far;
640 return (1);
641 }
642
643 /*
644 * abort_icache() handles the following data abort:
645 *
646 * FAULT_ICACHE - Instruction cache maintenance
647 *
648 * According to manual, FAULT_ICACHE is translation fault during cache
649 * maintenance operation. In fact, no cache maintenance operation on
650 * not mapped virtual addresses should be called. As cache maintenance
651 * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
652 * the abort is concider as fatal for now. However, all the matter with
653 * cache maintenance operation on virtual addresses could be really complex
654 * and fuzzy in SMP case, so maybe in future standard fault mechanism
655 * should be held here including vm_fault() calling.
656 */
657 static int
658 abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch,
659 struct thread *td, struct ksig *ksig)
660 {
661 abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
662 return(0);
663 }
Cache object: a1b12e1b26ab65ad1d0c8abc18fd5483
|