1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * Copyright (c) 2007 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by A. Joseph Koshy under
8 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $FreeBSD$
35 */
36
37 #include "opt_atpic.h"
38 #include "opt_compat.h"
39 #include "opt_kdtrace.h"
40 #include "opt_hwpmc_hooks.h"
41
42 #include <machine/asmacros.h>
43 #include <machine/psl.h>
44 #include <machine/trap.h>
45
46 #include "assym.s"
47
48 #ifdef KDTRACE_HOOKS
49 .bss
50 .globl dtrace_invop_jump_addr
51 .align 8
52 .type dtrace_invop_jump_addr, @object
53 .size dtrace_invop_jump_addr, 8
54 dtrace_invop_jump_addr:
55 .zero 8
56 .globl dtrace_invop_calltrap_addr
57 .align 8
58 .type dtrace_invop_calltrap_addr, @object
59 .size dtrace_invop_calltrap_addr, 8
60 dtrace_invop_calltrap_addr:
61 .zero 8
62 #endif
63 .text
64 #ifdef HWPMC_HOOKS
65 ENTRY(start_exceptions)
66 #endif
67
68
69 /*****************************************************************************/
70 /* Trap handling */
71 /*****************************************************************************/
72 /*
73 * Trap and fault vector routines.
74 *
75 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes
76 * state on the stack but also disables interrupts. This is important for
77 * us for the use of the swapgs instruction. We cannot be interrupted
78 * until the GS.base value is correct. For most traps, we automatically
79 * then enable interrupts if the interrupted context had them enabled.
80 * This is equivalent to the i386 port's use of SDT_SYS386TGT.
81 *
82 * The cpu will push a certain amount of state onto the kernel stack for
83 * the current process. See amd64/include/frame.h.
84 * This includes the current RFLAGS (status register, which includes
85 * the interrupt disable state prior to the trap), the code segment register,
86 * and the return instruction pointer are pushed by the cpu. The cpu
87 * will also push an 'error' code for certain traps. We push a dummy
88 * error code for those traps where the cpu doesn't in order to maintain
89 * a consistent frame. We also push a contrived 'trap number'.
90 *
91 * The cpu does not push the general registers, we must do that, and we
92 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and
93 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
94 * must load them with appropriate values for supervisor mode operation.
95 */
96
97 MCOUNT_LABEL(user)
98 MCOUNT_LABEL(btrap)
99
100 /* Traps that we leave interrupts disabled for.. */
101 #define TRAP_NOEN(a) \
102 subq $TF_RIP,%rsp; \
103 movq $(a),TF_TRAPNO(%rsp) ; \
104 movq $0,TF_ADDR(%rsp) ; \
105 movq $0,TF_ERR(%rsp) ; \
106 jmp alltraps_noen
107 IDTVEC(dbg)
108 TRAP_NOEN(T_TRCTRAP)
109 IDTVEC(bpt)
110 TRAP_NOEN(T_BPTFLT)
111
112 /* Regular traps; The cpu does not supply tf_err for these. */
113 #define TRAP(a) \
114 subq $TF_RIP,%rsp; \
115 movq $(a),TF_TRAPNO(%rsp) ; \
116 movq $0,TF_ADDR(%rsp) ; \
117 movq $0,TF_ERR(%rsp) ; \
118 jmp alltraps
119 IDTVEC(div)
120 TRAP(T_DIVIDE)
121 IDTVEC(ofl)
122 TRAP(T_OFLOW)
123 IDTVEC(bnd)
124 TRAP(T_BOUND)
125 IDTVEC(ill)
126 TRAP(T_PRIVINFLT)
127 IDTVEC(dna)
128 TRAP(T_DNA)
129 IDTVEC(fpusegm)
130 TRAP(T_FPOPFLT)
131 IDTVEC(mchk)
132 TRAP(T_MCHK)
133 IDTVEC(rsvd)
134 TRAP(T_RESERVED)
135 IDTVEC(fpu)
136 TRAP(T_ARITHTRAP)
137 IDTVEC(xmm)
138 TRAP(T_XMMFLT)
139
140 /* This group of traps have tf_err already pushed by the cpu */
141 #define TRAP_ERR(a) \
142 subq $TF_ERR,%rsp; \
143 movq $(a),TF_TRAPNO(%rsp) ; \
144 movq $0,TF_ADDR(%rsp) ; \
145 jmp alltraps
146 IDTVEC(tss)
147 TRAP_ERR(T_TSSFLT)
148 IDTVEC(missing)
149 TRAP_ERR(T_SEGNPFLT)
150 IDTVEC(stk)
151 TRAP_ERR(T_STKFLT)
152 IDTVEC(align)
153 TRAP_ERR(T_ALIGNFLT)
154
155 /*
156 * alltraps entry point. Use swapgs if this is the first time in the
157 * kernel from userland. Reenable interrupts if they were enabled
158 * before the trap. This approximates SDT_SYS386TGT on the i386 port.
159 */
160
161 SUPERALIGN_TEXT
162 .globl alltraps
163 .type alltraps,@function
164 alltraps:
165 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
166 jz alltraps_testi /* already running with kernel GS.base */
167 swapgs
168 alltraps_testi:
169 testl $PSL_I,TF_RFLAGS(%rsp)
170 jz alltraps_pushregs
171 sti
172 alltraps_pushregs:
173 movq %rdi,TF_RDI(%rsp)
174 alltraps_pushregs_no_rdi:
175 movq %rsi,TF_RSI(%rsp)
176 movq %rdx,TF_RDX(%rsp)
177 movq %rcx,TF_RCX(%rsp)
178 movq %r8,TF_R8(%rsp)
179 movq %r9,TF_R9(%rsp)
180 movq %rax,TF_RAX(%rsp)
181 movq %rbx,TF_RBX(%rsp)
182 movq %rbp,TF_RBP(%rsp)
183 movq %r10,TF_R10(%rsp)
184 movq %r11,TF_R11(%rsp)
185 movq %r12,TF_R12(%rsp)
186 movq %r13,TF_R13(%rsp)
187 movq %r14,TF_R14(%rsp)
188 movq %r15,TF_R15(%rsp)
189 FAKE_MCOUNT(TF_RIP(%rsp))
190 #ifdef KDTRACE_HOOKS
191 /*
192 * DTrace Function Boundary Trace (fbt) and Statically Defined
193 * Trace (sdt) probes are triggered by int3 (0xcc) which causes
194 * the #BP (T_BPTFLT) breakpoint interrupt. For all other trap
195 * types, just handle them in the usual way.
196 */
197 cmpq $T_BPTFLT,TF_TRAPNO(%rsp)
198 jne calltrap
199
200 /* Check if there is no DTrace hook registered. */
201 cmpq $0,dtrace_invop_jump_addr
202 je calltrap
203
204 /*
205 * Set our jump address for the jump back in the event that
206 * the breakpoint wasn't caused by DTrace at all.
207 */
208 movq $calltrap, dtrace_invop_calltrap_addr(%rip)
209
210 /* Jump to the code hooked in by DTrace. */
211 movq dtrace_invop_jump_addr, %rax
212 jmpq *dtrace_invop_jump_addr
213 #endif
214 .globl calltrap
215 .type calltrap,@function
216 calltrap:
217 movq %rsp, %rdi
218 call trap
219 MEXITCOUNT
220 jmp doreti /* Handle any pending ASTs */
221
222 /*
223 * alltraps_noen entry point. Unlike alltraps above, we want to
224 * leave the interrupts disabled. This corresponds to
225 * SDT_SYS386IGT on the i386 port.
226 */
227 SUPERALIGN_TEXT
228 .globl alltraps_noen
229 .type alltraps_noen,@function
230 alltraps_noen:
231 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
232 jz alltraps_pushregs /* already running with kernel GS.base */
233 swapgs
234 jmp alltraps_pushregs
235
236 IDTVEC(dblfault)
237 subq $TF_ERR,%rsp
238 movq $T_DOUBLEFLT,TF_TRAPNO(%rsp)
239 movq $0,TF_ADDR(%rsp)
240 movq $0,TF_ERR(%rsp)
241 movq %rdi,TF_RDI(%rsp)
242 movq %rsi,TF_RSI(%rsp)
243 movq %rdx,TF_RDX(%rsp)
244 movq %rcx,TF_RCX(%rsp)
245 movq %r8,TF_R8(%rsp)
246 movq %r9,TF_R9(%rsp)
247 movq %rax,TF_RAX(%rsp)
248 movq %rbx,TF_RBX(%rsp)
249 movq %rbp,TF_RBP(%rsp)
250 movq %r10,TF_R10(%rsp)
251 movq %r11,TF_R11(%rsp)
252 movq %r12,TF_R12(%rsp)
253 movq %r13,TF_R13(%rsp)
254 movq %r14,TF_R14(%rsp)
255 movq %r15,TF_R15(%rsp)
256 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
257 jz 1f /* already running with kernel GS.base */
258 swapgs
259 1: movq %rsp, %rdi
260 call dblfault_handler
261 2: hlt
262 jmp 2b
263
264 IDTVEC(page)
265 subq $TF_ERR,%rsp
266 movq $T_PAGEFLT,TF_TRAPNO(%rsp)
267 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
268 jz 1f /* already running with kernel GS.base */
269 swapgs
270 1:
271 movq %rdi,TF_RDI(%rsp) /* free up a GP register */
272 movq %cr2,%rdi /* preserve %cr2 before .. */
273 movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */
274 testl $PSL_I,TF_RFLAGS(%rsp)
275 jz alltraps_pushregs_no_rdi
276 sti
277 jmp alltraps_pushregs_no_rdi
278
279 /*
280 * We have to special-case this one. If we get a trap in doreti() at
281 * the iretq stage, we'll reenter with the wrong gs state. We'll have
282 * to do a special the swapgs in this case even coming from the kernel.
283 * XXX linux has a trap handler for their equivalent of load_gs().
284 */
285 IDTVEC(prot)
286 subq $TF_ERR,%rsp
287 movq $T_PROTFLT,TF_TRAPNO(%rsp)
288 movq $0,TF_ADDR(%rsp)
289 movq %rdi,TF_RDI(%rsp) /* free up a GP register */
290 leaq doreti_iret(%rip),%rdi
291 cmpq %rdi,TF_RIP(%rsp)
292 je 2f /* kernel but with user gsbase!! */
293 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
294 jz 1f /* already running with kernel GS.base */
295 2:
296 swapgs
297 1:
298 testl $PSL_I,TF_RFLAGS(%rsp)
299 jz alltraps_pushregs_no_rdi
300 sti
301 jmp alltraps_pushregs_no_rdi
302
303 /*
304 * Fast syscall entry point. We enter here with just our new %cs/%ss set,
305 * and the new privilige level. We are still running on the old user stack
306 * pointer. We have to juggle a few things around to find our stack etc.
307 * swapgs gives us access to our PCPU space only.
308 */
309 IDTVEC(fast_syscall)
310 swapgs
311 movq %rsp,PCPU(SCRATCH_RSP)
312 movq PCPU(RSP0),%rsp
313 /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */
314 subq $TF_SIZE,%rsp
315 /* defer TF_RSP till we have a spare register */
316 movq %r11,TF_RFLAGS(%rsp)
317 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
318 movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */
319 movq %r11,TF_RSP(%rsp) /* user stack pointer */
320 sti
321 movq $KUDSEL,TF_SS(%rsp)
322 movq $KUCSEL,TF_CS(%rsp)
323 movq $2,TF_ERR(%rsp)
324 movq %rdi,TF_RDI(%rsp) /* arg 1 */
325 movq %rsi,TF_RSI(%rsp) /* arg 2 */
326 movq %rdx,TF_RDX(%rsp) /* arg 3 */
327 movq %r10,TF_RCX(%rsp) /* arg 4 */
328 movq %r8,TF_R8(%rsp) /* arg 5 */
329 movq %r9,TF_R9(%rsp) /* arg 6 */
330 movq %rax,TF_RAX(%rsp) /* syscall number */
331 movq %rbx,TF_RBX(%rsp) /* C preserved */
332 movq %rbp,TF_RBP(%rsp) /* C preserved */
333 movq %r12,TF_R12(%rsp) /* C preserved */
334 movq %r13,TF_R13(%rsp) /* C preserved */
335 movq %r14,TF_R14(%rsp) /* C preserved */
336 movq %r15,TF_R15(%rsp) /* C preserved */
337 FAKE_MCOUNT(TF_RIP(%rsp))
338 movq %rsp, %rdi
339 call syscall
340 movq PCPU(CURPCB),%rax
341 testq $PCB_FULLCTX,PCB_FLAGS(%rax)
342 jne 3f
343 1: /* Check for and handle AST's on return to userland */
344 cli
345 movq PCPU(CURTHREAD),%rax
346 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
347 je 2f
348 sti
349 movq %rsp, %rdi
350 call ast
351 jmp 1b
352 2: /* restore preserved registers */
353 MEXITCOUNT
354 movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
355 movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
356 movq TF_RDX(%rsp),%rdx /* return value 2 */
357 movq TF_RAX(%rsp),%rax /* return value 1 */
358 movq TF_RBX(%rsp),%rbx /* C preserved */
359 movq TF_RBP(%rsp),%rbp /* C preserved */
360 movq TF_R12(%rsp),%r12 /* C preserved */
361 movq TF_R13(%rsp),%r13 /* C preserved */
362 movq TF_R14(%rsp),%r14 /* C preserved */
363 movq TF_R15(%rsp),%r15 /* C preserved */
364 movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
365 movq TF_RIP(%rsp),%rcx /* original %rip */
366 movq TF_RSP(%rsp),%r9 /* user stack pointer */
367 movq %r9,%rsp /* original %rsp */
368 swapgs
369 sysretq
370 3: /* Requested full context restore, use doreti for that */
371 andq $~PCB_FULLCTX,PCB_FLAGS(%rax)
372 MEXITCOUNT
373 jmp doreti
374
375 /*
376 * Here for CYA insurance, in case a "syscall" instruction gets
377 * issued from 32 bit compatability mode. MSR_CSTAR has to point
378 * to *something* if EFER_SCE is enabled.
379 */
380 IDTVEC(fast_syscall32)
381 sysret
382
383 /*
384 * NMI handling is special.
385 *
386 * First, NMIs do not respect the state of the processor's RFLAGS.IF
387 * bit. The NMI handler may be entered at any time, including when
388 * the processor is in a critical section with RFLAGS.IF == 0.
389 * The processor's GS.base value could be invalid on entry to the
390 * handler.
391 *
392 * Second, the processor treats NMIs specially, blocking further NMIs
393 * until an 'iretq' instruction is executed. We thus need to execute
394 * the NMI handler with interrupts disabled, to prevent a nested interrupt
395 * from executing an 'iretq' instruction and inadvertently taking the
396 * processor out of NMI mode.
397 *
398 * Third, the NMI handler runs on its own stack (tss_ist2). The canonical
399 * GS.base value for the processor is stored just above the bottom of its
400 * NMI stack. For NMIs taken from kernel mode, the current value in
401 * the processor's GS.base is saved at entry to C-preserved register %r12,
402 * the canonical value for GS.base is then loaded into the processor, and
403 * the saved value is restored at exit time. For NMIs taken from user mode,
404 * the cheaper 'SWAPGS' instructions are used for swapping GS.base.
405 */
406
407 IDTVEC(nmi)
408 subq $TF_RIP,%rsp
409 movq $(T_NMI),TF_TRAPNO(%rsp)
410 movq $0,TF_ADDR(%rsp)
411 movq $0,TF_ERR(%rsp)
412 movq %rdi,TF_RDI(%rsp)
413 movq %rsi,TF_RSI(%rsp)
414 movq %rdx,TF_RDX(%rsp)
415 movq %rcx,TF_RCX(%rsp)
416 movq %r8,TF_R8(%rsp)
417 movq %r9,TF_R9(%rsp)
418 movq %rax,TF_RAX(%rsp)
419 movq %rbx,TF_RBX(%rsp)
420 movq %rbp,TF_RBP(%rsp)
421 movq %r10,TF_R10(%rsp)
422 movq %r11,TF_R11(%rsp)
423 movq %r12,TF_R12(%rsp)
424 movq %r13,TF_R13(%rsp)
425 movq %r14,TF_R14(%rsp)
426 movq %r15,TF_R15(%rsp)
427 xorl %ebx,%ebx
428 testb $SEL_RPL_MASK,TF_CS(%rsp)
429 jnz nmi_fromuserspace
430 /*
431 * We've interrupted the kernel. Preserve GS.base in %r12.
432 */
433 movl $MSR_GSBASE,%ecx
434 rdmsr
435 movq %rax,%r12
436 shlq $32,%rdx
437 orq %rdx,%r12
438 /* Retrieve and load the canonical value for GS.base. */
439 movq TF_SIZE(%rsp),%rdx
440 movl %edx,%eax
441 shrq $32,%rdx
442 wrmsr
443 jmp nmi_calltrap
444 nmi_fromuserspace:
445 incl %ebx
446 swapgs
447 /* Note: this label is also used by ddb and gdb: */
448 nmi_calltrap:
449 FAKE_MCOUNT(TF_RIP(%rsp))
450 movq %rsp, %rdi
451 call trap
452 MEXITCOUNT
453 #ifdef HWPMC_HOOKS
454 /*
455 * Capture a userspace callchain if needed.
456 *
457 * - Check if the current trap was from user mode.
458 * - Check if the current thread is valid.
459 * - Check if the thread requires a user call chain to be
460 * captured.
461 *
462 * We are still in NMI mode at this point.
463 */
464 testl %ebx,%ebx
465 jz nocallchain /* not from userspace */
466 movq PCPU(CURTHREAD),%rax
467 orq %rax,%rax /* curthread present? */
468 jz nocallchain
469 testl $TDP_CALLCHAIN,TD_PFLAGS(%rax) /* flagged for capture? */
470 jz nocallchain
471 /*
472 * A user callchain is to be captured, so:
473 * - Move execution to the regular kernel stack, to allow for
474 * nested NMI interrupts.
475 * - Take the processor out of "NMI" mode by faking an "iret".
476 * - Enable interrupts, so that copyin() can work.
477 */
478 movq %rsp,%rsi /* source stack pointer */
479 movq $TF_SIZE,%rcx
480 movq PCPU(RSP0),%rdx
481 subq %rcx,%rdx
482 movq %rdx,%rdi /* destination stack pointer */
483
484 shrq $3,%rcx /* trap frame size in long words */
485 cld
486 rep
487 movsq /* copy trapframe */
488
489 movl %ss,%eax
490 pushq %rax /* tf_ss */
491 pushq %rdx /* tf_rsp (on kernel stack) */
492 pushfq /* tf_rflags */
493 movl %cs,%eax
494 pushq %rax /* tf_cs */
495 pushq $outofnmi /* tf_rip */
496 iretq
497 outofnmi:
498 /*
499 * At this point the processor has exited NMI mode and is running
500 * with interrupts turned off on the normal kernel stack.
501 *
502 * If a pending NMI gets recognized at or after this point, it
503 * will cause a kernel callchain to be traced.
504 *
505 * We turn interrupts back on, and call the user callchain capture hook.
506 */
507 movq pmc_hook,%rax
508 orq %rax,%rax
509 jz nocallchain
510 movq PCPU(CURTHREAD),%rdi /* thread */
511 movq $PMC_FN_USER_CALLCHAIN,%rsi /* command */
512 movq %rsp,%rdx /* frame */
513 sti
514 call *%rax
515 cli
516 nocallchain:
517 #endif
518 testl %ebx,%ebx
519 jz nmi_kernelexit
520 swapgs
521 jmp nmi_restoreregs
522 nmi_kernelexit:
523 /*
524 * Put back the preserved MSR_GSBASE value.
525 */
526 movl $MSR_GSBASE,%ecx
527 movq %r12,%rdx
528 movl %edx,%eax
529 shrq $32,%rdx
530 wrmsr
531 nmi_restoreregs:
532 movq TF_RDI(%rsp),%rdi
533 movq TF_RSI(%rsp),%rsi
534 movq TF_RDX(%rsp),%rdx
535 movq TF_RCX(%rsp),%rcx
536 movq TF_R8(%rsp),%r8
537 movq TF_R9(%rsp),%r9
538 movq TF_RAX(%rsp),%rax
539 movq TF_RBX(%rsp),%rbx
540 movq TF_RBP(%rsp),%rbp
541 movq TF_R10(%rsp),%r10
542 movq TF_R11(%rsp),%r11
543 movq TF_R12(%rsp),%r12
544 movq TF_R13(%rsp),%r13
545 movq TF_R14(%rsp),%r14
546 movq TF_R15(%rsp),%r15
547 addq $TF_RIP,%rsp
548 iretq
549
550 ENTRY(fork_trampoline)
551 movq %r12, %rdi /* function */
552 movq %rbx, %rsi /* arg1 */
553 movq %rsp, %rdx /* trapframe pointer */
554 call fork_exit
555 MEXITCOUNT
556 jmp doreti /* Handle any ASTs */
557
558 /*
559 * To efficiently implement classification of trap and interrupt handlers
560 * for profiling, there must be only trap handlers between the labels btrap
561 * and bintr, and only interrupt handlers between the labels bintr and
562 * eintr. This is implemented (partly) by including files that contain
563 * some of the handlers. Before including the files, set up a normal asm
564 * environment so that the included files doen't need to know that they are
565 * included.
566 */
567
568 #ifdef COMPAT_IA32
569 .data
570 .p2align 4
571 .text
572 SUPERALIGN_TEXT
573
574 #include <amd64/ia32/ia32_exception.S>
575 #endif
576
577 .data
578 .p2align 4
579 .text
580 SUPERALIGN_TEXT
581 MCOUNT_LABEL(bintr)
582
583 #include <amd64/amd64/apic_vector.S>
584
585 #ifdef DEV_ATPIC
586 .data
587 .p2align 4
588 .text
589 SUPERALIGN_TEXT
590
591 #include <amd64/isa/atpic_vector.S>
592 #endif
593
594 .text
595 MCOUNT_LABEL(eintr)
596
597 /*
598 * void doreti(struct trapframe)
599 *
600 * Handle return from interrupts, traps and syscalls.
601 */
602 .text
603 SUPERALIGN_TEXT
604 .type doreti,@function
605 doreti:
606 FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
607 /*
608 * Check if ASTs can be handled now.
609 */
610 testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */
611 jz doreti_exit /* can't handle ASTs now if not */
612
613 doreti_ast:
614 /*
615 * Check for ASTs atomically with returning. Disabling CPU
616 * interrupts provides sufficient locking eve in the SMP case,
617 * since we will be informed of any new ASTs by an IPI.
618 */
619 cli
620 movq PCPU(CURTHREAD),%rax
621 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
622 je doreti_exit
623 sti
624 movq %rsp, %rdi /* pass a pointer to the trapframe */
625 call ast
626 jmp doreti_ast
627
628 /*
629 * doreti_exit: pop registers, iret.
630 *
631 * The segment register pop is a special case, since it may
632 * fault if (for example) a sigreturn specifies bad segment
633 * registers. The fault is handled in trap.c.
634 */
635 doreti_exit:
636 MEXITCOUNT
637 movq TF_RDI(%rsp),%rdi
638 movq TF_RSI(%rsp),%rsi
639 movq TF_RDX(%rsp),%rdx
640 movq TF_RCX(%rsp),%rcx
641 movq TF_R8(%rsp),%r8
642 movq TF_R9(%rsp),%r9
643 movq TF_RAX(%rsp),%rax
644 movq TF_RBX(%rsp),%rbx
645 movq TF_RBP(%rsp),%rbp
646 movq TF_R10(%rsp),%r10
647 movq TF_R11(%rsp),%r11
648 movq TF_R12(%rsp),%r12
649 movq TF_R13(%rsp),%r13
650 movq TF_R14(%rsp),%r14
651 movq TF_R15(%rsp),%r15
652 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
653 jz 1f /* keep running with kernel GS.base */
654 cli
655 swapgs
656 1: addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */
657 .globl doreti_iret
658 doreti_iret:
659 iretq
660
661 /*
662 * doreti_iret_fault. Alternative return code for
663 * the case where we get a fault in the doreti_exit code
664 * above. trap() (amd64/amd64/trap.c) catches this specific
665 * case, sends the process a signal and continues in the
666 * corresponding place in the code below.
667 */
668 ALIGN_TEXT
669 .globl doreti_iret_fault
670 doreti_iret_fault:
671 subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */
672 testl $PSL_I,TF_RFLAGS(%rsp)
673 jz 1f
674 sti
675 1: movq %rdi,TF_RDI(%rsp)
676 movq %rsi,TF_RSI(%rsp)
677 movq %rdx,TF_RDX(%rsp)
678 movq %rcx,TF_RCX(%rsp)
679 movq %r8,TF_R8(%rsp)
680 movq %r9,TF_R9(%rsp)
681 movq %rax,TF_RAX(%rsp)
682 movq %rbx,TF_RBX(%rsp)
683 movq %rbp,TF_RBP(%rsp)
684 movq %r10,TF_R10(%rsp)
685 movq %r11,TF_R11(%rsp)
686 movq %r12,TF_R12(%rsp)
687 movq %r13,TF_R13(%rsp)
688 movq %r14,TF_R14(%rsp)
689 movq %r15,TF_R15(%rsp)
690 movq $T_PROTFLT,TF_TRAPNO(%rsp)
691 movq $0,TF_ERR(%rsp) /* XXX should be the error code */
692 movq $0,TF_ADDR(%rsp)
693 FAKE_MCOUNT(TF_RIP(%rsp))
694 jmp calltrap
695 #ifdef HWPMC_HOOKS
696 ENTRY(end_exceptions)
697 #endif
Cache object: c81eeabaf45315d88c911292ff955e30
|