1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * Copyright (c) 2007-2018 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by A. Joseph Koshy under
8 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 *
10 * Portions of this software were developed by
11 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
12 * the FreeBSD Foundation.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * $FreeBSD: releng/11.2/sys/amd64/amd64/exception.S 343789 2019-02-05 18:07:45Z emaste $
39 */
40
41 #include "opt_atpic.h"
42 #include "opt_compat.h"
43 #include "opt_hwpmc_hooks.h"
44
45 #include "assym.s"
46
47 #include <machine/asmacros.h>
48 #include <machine/psl.h>
49 #include <machine/trap.h>
50 #include <machine/specialreg.h>
51
52 #ifdef KDTRACE_HOOKS
53 .bss
54 .globl dtrace_invop_jump_addr
55 .align 8
56 .type dtrace_invop_jump_addr,@object
57 .size dtrace_invop_jump_addr,8
58 dtrace_invop_jump_addr:
59 .zero 8
60 .globl dtrace_invop_calltrap_addr
61 .align 8
62 .type dtrace_invop_calltrap_addr,@object
63 .size dtrace_invop_calltrap_addr,8
64 dtrace_invop_calltrap_addr:
65 .zero 8
66 #endif
67 .text
68 #ifdef HWPMC_HOOKS
69 ENTRY(start_exceptions)
70 #endif
71
72 /*****************************************************************************/
73 /* Trap handling */
74 /*****************************************************************************/
75 /*
76 * Trap and fault vector routines.
77 *
78 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes
79 * state on the stack but also disables interrupts. This is important for
80 * us for the use of the swapgs instruction. We cannot be interrupted
81 * until the GS.base value is correct. For most traps, we automatically
82 * then enable interrupts if the interrupted context had them enabled.
83 * This is equivalent to the i386 port's use of SDT_SYS386TGT.
84 *
85 * The cpu will push a certain amount of state onto the kernel stack for
86 * the current process. See amd64/include/frame.h.
87 * This includes the current RFLAGS (status register, which includes
88 * the interrupt disable state prior to the trap), the code segment register,
89 * and the return instruction pointer are pushed by the cpu. The cpu
90 * will also push an 'error' code for certain traps. We push a dummy
91 * error code for those traps where the cpu doesn't in order to maintain
92 * a consistent frame. We also push a contrived 'trap number'.
93 *
94 * The CPU does not push the general registers, so we must do that, and we
95 * must restore them prior to calling 'iret'. The CPU adjusts %cs and %ss
96 * but does not mess with %ds, %es, %gs or %fs. We swap the %gs base for
97 * for the kernel mode operation shortly, without changes to the selector
98 * loaded. Since superuser long mode works with any selectors loaded into
99 * segment registers other then %cs, which makes them mostly unused in long
100 * mode, and kernel does not reference %fs, leave them alone. The segment
101 * registers are reloaded on return to the usermode.
102 */
103
104 MCOUNT_LABEL(user)
105 MCOUNT_LABEL(btrap)
106
107 /* Traps that we leave interrupts disabled for. */
108 .macro TRAP_NOEN l, trapno
109 PTI_ENTRY \l,X\l
110 .globl X\l
111 .type X\l,@function
112 X\l: subq $TF_RIP,%rsp
113 movl $\trapno,TF_TRAPNO(%rsp)
114 movq $0,TF_ADDR(%rsp)
115 movq $0,TF_ERR(%rsp)
116 jmp alltraps_noen
117 .endm
118
119 TRAP_NOEN bpt, T_BPTFLT
120 #ifdef KDTRACE_HOOKS
121 TRAP_NOEN dtrace_ret, T_DTRACE_RET
122 #endif
123
124 /* Regular traps; The cpu does not supply tf_err for these. */
125 .macro TRAP l, trapno
126 PTI_ENTRY \l,X\l
127 .globl X\l
128 .type X\l,@function
129 X\l:
130 subq $TF_RIP,%rsp
131 movl $\trapno,TF_TRAPNO(%rsp)
132 movq $0,TF_ADDR(%rsp)
133 movq $0,TF_ERR(%rsp)
134 jmp alltraps
135 .endm
136
137 TRAP div, T_DIVIDE
138 TRAP ofl, T_OFLOW
139 TRAP bnd, T_BOUND
140 TRAP ill, T_PRIVINFLT
141 TRAP dna, T_DNA
142 TRAP fpusegm, T_FPOPFLT
143 TRAP rsvd, T_RESERVED
144 TRAP fpu, T_ARITHTRAP
145 TRAP xmm, T_XMMFLT
146
147 /* This group of traps have tf_err already pushed by the cpu. */
148 .macro TRAP_ERR l, trapno
149 PTI_ENTRY \l,X\l,has_err=1
150 .globl X\l
151 .type X\l,@function
152 X\l:
153 subq $TF_ERR,%rsp
154 movl $\trapno,TF_TRAPNO(%rsp)
155 movq $0,TF_ADDR(%rsp)
156 jmp alltraps
157 .endm
158
159 TRAP_ERR tss, T_TSSFLT
160 TRAP_ERR align, T_ALIGNFLT
161
162 /*
163 * alltraps entry point. Use swapgs if this is the first time in the
164 * kernel from userland. Reenable interrupts if they were enabled
165 * before the trap. This approximates SDT_SYS386TGT on the i386 port.
166 */
167 SUPERALIGN_TEXT
168 .globl alltraps
169 .type alltraps,@function
170 alltraps:
171 movq %rdi,TF_RDI(%rsp)
172 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
173 jz 1f /* already running with kernel GS.base */
174 swapgs
175 movq PCPU(CURPCB),%rdi
176 andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi)
177 1: SAVE_SEGS
178 movq %rdx,TF_RDX(%rsp)
179 movq %rax,TF_RAX(%rsp)
180 movq %rcx,TF_RCX(%rsp)
181 testb $SEL_RPL_MASK,TF_CS(%rsp)
182 jz 2f
183 call handle_ibrs_entry
184 2: testl $PSL_I,TF_RFLAGS(%rsp)
185 jz alltraps_pushregs_no_rax
186 sti
187 alltraps_pushregs_no_rax:
188 movq %rsi,TF_RSI(%rsp)
189 movq %r8,TF_R8(%rsp)
190 movq %r9,TF_R9(%rsp)
191 movq %rbx,TF_RBX(%rsp)
192 movq %rbp,TF_RBP(%rsp)
193 movq %r10,TF_R10(%rsp)
194 movq %r11,TF_R11(%rsp)
195 movq %r12,TF_R12(%rsp)
196 movq %r13,TF_R13(%rsp)
197 movq %r14,TF_R14(%rsp)
198 movq %r15,TF_R15(%rsp)
199 movl $TF_HASSEGS,TF_FLAGS(%rsp)
200 cld
201 FAKE_MCOUNT(TF_RIP(%rsp))
202 #ifdef KDTRACE_HOOKS
203 /*
204 * DTrace Function Boundary Trace (fbt) probes are triggered
205 * by int3 (0xcc) which causes the #BP (T_BPTFLT) breakpoint
206 * interrupt. For all other trap types, just handle them in
207 * the usual way.
208 */
209 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
210 jnz calltrap /* ignore userland traps */
211 cmpl $T_BPTFLT,TF_TRAPNO(%rsp)
212 jne calltrap
213
214 /* Check if there is no DTrace hook registered. */
215 cmpq $0,dtrace_invop_jump_addr
216 je calltrap
217
218 /*
219 * Set our jump address for the jump back in the event that
220 * the breakpoint wasn't caused by DTrace at all.
221 */
222 movq $calltrap,dtrace_invop_calltrap_addr(%rip)
223
224 /* Jump to the code hooked in by DTrace. */
225 jmpq *dtrace_invop_jump_addr
226 #endif
227 .globl calltrap
228 .type calltrap,@function
229 calltrap:
230 movq %rsp,%rdi
231 call trap_check
232 MEXITCOUNT
233 jmp doreti /* Handle any pending ASTs */
234
235 /*
236 * alltraps_noen entry point. Unlike alltraps above, we want to
237 * leave the interrupts disabled. This corresponds to
238 * SDT_SYS386IGT on the i386 port.
239 */
240 SUPERALIGN_TEXT
241 .globl alltraps_noen
242 .type alltraps_noen,@function
243 alltraps_noen:
244 movq %rdi,TF_RDI(%rsp)
245 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
246 jz 1f /* already running with kernel GS.base */
247 swapgs
248 movq PCPU(CURPCB),%rdi
249 andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi)
250 1: SAVE_SEGS
251 movq %rdx,TF_RDX(%rsp)
252 movq %rax,TF_RAX(%rsp)
253 movq %rcx,TF_RCX(%rsp)
254 testb $SEL_RPL_MASK,TF_CS(%rsp)
255 jz alltraps_pushregs_no_rax
256 call handle_ibrs_entry
257 jmp alltraps_pushregs_no_rax
258
259 IDTVEC(dblfault)
260 subq $TF_ERR,%rsp
261 movl $T_DOUBLEFLT,TF_TRAPNO(%rsp)
262 movq $0,TF_ADDR(%rsp)
263 movq $0,TF_ERR(%rsp)
264 movq %rdi,TF_RDI(%rsp)
265 movq %rsi,TF_RSI(%rsp)
266 movq %rdx,TF_RDX(%rsp)
267 movq %rcx,TF_RCX(%rsp)
268 movq %r8,TF_R8(%rsp)
269 movq %r9,TF_R9(%rsp)
270 movq %rax,TF_RAX(%rsp)
271 movq %rbx,TF_RBX(%rsp)
272 movq %rbp,TF_RBP(%rsp)
273 movq %r10,TF_R10(%rsp)
274 movq %r11,TF_R11(%rsp)
275 movq %r12,TF_R12(%rsp)
276 movq %r13,TF_R13(%rsp)
277 movq %r14,TF_R14(%rsp)
278 movq %r15,TF_R15(%rsp)
279 SAVE_SEGS
280 movl $TF_HASSEGS,TF_FLAGS(%rsp)
281 cld
282 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
283 jz 1f /* already running with kernel GS.base */
284 swapgs
285 1:
286 movq PCPU(KCR3),%rax
287 cmpq $~0,%rax
288 je 2f
289 movq %rax,%cr3
290 2: movq %rsp,%rdi
291 call dblfault_handler
292 3: hlt
293 jmp 3b
294
295 ALIGN_TEXT
296 IDTVEC(page_pti)
297 testb $SEL_RPL_MASK,PTI_CS-2*8(%rsp)
298 jz Xpage
299 swapgs
300 pushq %rax
301 pushq %rdx
302 movq %cr3,%rax
303 movq %rax,PCPU(SAVED_UCR3)
304 PTI_UUENTRY has_err=1
305 subq $TF_ERR,%rsp
306 movq %rdi,TF_RDI(%rsp)
307 movq %rax,TF_RAX(%rsp)
308 movq %rdx,TF_RDX(%rsp)
309 movq %rcx,TF_RCX(%rsp)
310 jmp page_u
311 IDTVEC(page)
312 subq $TF_ERR,%rsp
313 movq %rdi,TF_RDI(%rsp) /* free up GP registers */
314 movq %rax,TF_RAX(%rsp)
315 movq %rdx,TF_RDX(%rsp)
316 movq %rcx,TF_RCX(%rsp)
317 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
318 jz page_cr2 /* already running with kernel GS.base */
319 swapgs
320 page_u: movq PCPU(CURPCB),%rdi
321 andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi)
322 movq PCPU(SAVED_UCR3),%rax
323 movq %rax,PCB_SAVED_UCR3(%rdi)
324 call handle_ibrs_entry
325 page_cr2:
326 movq %cr2,%rdi /* preserve %cr2 before .. */
327 movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */
328 SAVE_SEGS
329 movl $T_PAGEFLT,TF_TRAPNO(%rsp)
330 testl $PSL_I,TF_RFLAGS(%rsp)
331 jz alltraps_pushregs_no_rax
332 sti
333 jmp alltraps_pushregs_no_rax
334
335 /*
336 * We have to special-case this one. If we get a trap in doreti() at
337 * the iretq stage, we'll reenter with the wrong gs state. We'll have
338 * to do a special the swapgs in this case even coming from the kernel.
339 * XXX linux has a trap handler for their equivalent of load_gs().
340 *
341 * On the stack, we have the hardware interrupt frame to return
342 * to usermode (faulted) and another frame with error code, for
343 * fault. For PTI, copy both frames to the main thread stack.
344 */
345 .macro PROTF_ENTRY name,trapno
346 \name\()_pti_doreti:
347 pushq %rax
348 pushq %rdx
349 swapgs
350 movq PCPU(KCR3),%rax
351 movq %rax,%cr3
352 movq PCPU(RSP0),%rax
353 subq $2*PTI_SIZE-3*8,%rax /* no err, %rax, %rdx in faulted frame */
354 MOVE_STACKS (PTI_SIZE / 4 - 3)
355 movq %rax,%rsp
356 popq %rdx
357 popq %rax
358 swapgs
359 jmp X\name
360 IDTVEC(\name\()_pti)
361 cmpq $doreti_iret,PTI_RIP-2*8(%rsp)
362 je \name\()_pti_doreti
363 testb $SEL_RPL_MASK,PTI_CS-2*8(%rsp) /* %rax, %rdx not yet pushed */
364 jz X\name
365 PTI_UENTRY has_err=1
366 swapgs
367 IDTVEC(\name)
368 subq $TF_ERR,%rsp
369 movl $\trapno,TF_TRAPNO(%rsp)
370 jmp prot_addrf
371 .endm
372
373 PROTF_ENTRY missing, T_SEGNPFLT
374 PROTF_ENTRY stk, T_STKFLT
375 PROTF_ENTRY prot, T_PROTFLT
376
377 prot_addrf:
378 movq $0,TF_ADDR(%rsp)
379 movq %rdi,TF_RDI(%rsp) /* free up a GP register */
380 movq %rax,TF_RAX(%rsp)
381 movq %rdx,TF_RDX(%rsp)
382 movq %rcx,TF_RCX(%rsp)
383 movw %fs,TF_FS(%rsp)
384 movw %gs,TF_GS(%rsp)
385 leaq doreti_iret(%rip),%rdi
386 cmpq %rdi,TF_RIP(%rsp)
387 je 5f /* kernel but with user gsbase!! */
388 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
389 jz 6f /* already running with kernel GS.base */
390 testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
391 jz 2f
392 cmpw $KUF32SEL,TF_FS(%rsp)
393 jne 1f
394 rdfsbase %rax
395 1: cmpw $KUG32SEL,TF_GS(%rsp)
396 jne 2f
397 rdgsbase %rdx
398 2: swapgs
399 movq PCPU(CURPCB),%rdi
400 testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
401 jz 4f
402 cmpw $KUF32SEL,TF_FS(%rsp)
403 jne 3f
404 movq %rax,PCB_FSBASE(%rdi)
405 3: cmpw $KUG32SEL,TF_GS(%rsp)
406 jne 4f
407 movq %rdx,PCB_GSBASE(%rdi)
408 4: call handle_ibrs_entry
409 orl $PCB_FULL_IRET,PCB_FLAGS(%rdi) /* always full iret from GPF */
410 movw %es,TF_ES(%rsp)
411 movw %ds,TF_DS(%rsp)
412 testl $PSL_I,TF_RFLAGS(%rsp)
413 jz alltraps_pushregs_no_rax
414 sti
415 jmp alltraps_pushregs_no_rax
416
417 5: swapgs
418 6: movq PCPU(CURPCB),%rdi
419 jmp 4b
420
421 /*
422 * Fast syscall entry point. We enter here with just our new %cs/%ss set,
423 * and the new privilige level. We are still running on the old user stack
424 * pointer. We have to juggle a few things around to find our stack etc.
425 * swapgs gives us access to our PCPU space only.
426 *
427 * We do not support invoking this from a custom segment registers,
428 * esp. %cs, %ss, %fs, %gs, e.g. using entries from an LDT.
429 */
430 SUPERALIGN_TEXT
431 IDTVEC(fast_syscall_pti)
432 swapgs
433 movq %rax,PCPU(SCRATCH_RAX)
434 movq PCPU(KCR3),%rax
435 movq %rax,%cr3
436 jmp fast_syscall_common
437 SUPERALIGN_TEXT
438 IDTVEC(fast_syscall)
439 swapgs
440 movq %rax,PCPU(SCRATCH_RAX)
441 fast_syscall_common:
442 movq %rsp,PCPU(SCRATCH_RSP)
443 movq PCPU(RSP0),%rsp
444 /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */
445 subq $TF_SIZE,%rsp
446 /* defer TF_RSP till we have a spare register */
447 movq %r11,TF_RFLAGS(%rsp)
448 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
449 movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */
450 movq %r11,TF_RSP(%rsp) /* user stack pointer */
451 movq PCPU(SCRATCH_RAX),%rax
452 movq %rax,TF_RAX(%rsp) /* syscall number */
453 movq %rdx,TF_RDX(%rsp) /* arg 3 */
454 SAVE_SEGS
455 call handle_ibrs_entry
456 movq PCPU(CURPCB),%r11
457 andl $~PCB_FULL_IRET,PCB_FLAGS(%r11)
458 sti
459 movq $KUDSEL,TF_SS(%rsp)
460 movq $KUCSEL,TF_CS(%rsp)
461 movq $2,TF_ERR(%rsp)
462 movq %rdi,TF_RDI(%rsp) /* arg 1 */
463 movq %rsi,TF_RSI(%rsp) /* arg 2 */
464 movq %r10,TF_RCX(%rsp) /* arg 4 */
465 movq %r8,TF_R8(%rsp) /* arg 5 */
466 movq %r9,TF_R9(%rsp) /* arg 6 */
467 movq %rbx,TF_RBX(%rsp) /* C preserved */
468 movq %rbp,TF_RBP(%rsp) /* C preserved */
469 movq %r12,TF_R12(%rsp) /* C preserved */
470 movq %r13,TF_R13(%rsp) /* C preserved */
471 movq %r14,TF_R14(%rsp) /* C preserved */
472 movq %r15,TF_R15(%rsp) /* C preserved */
473 movl $TF_HASSEGS,TF_FLAGS(%rsp)
474 FAKE_MCOUNT(TF_RIP(%rsp))
475 movq PCPU(CURTHREAD),%rdi
476 movq %rsp,TD_FRAME(%rdi)
477 movl TF_RFLAGS(%rsp),%esi
478 andl $PSL_T,%esi
479 call amd64_syscall
480 1: movq PCPU(CURPCB),%rax
481 /* Disable interrupts before testing PCB_FULL_IRET. */
482 cli
483 testl $PCB_FULL_IRET,PCB_FLAGS(%rax)
484 jnz 4f
485 /* Check for and handle AST's on return to userland. */
486 movq PCPU(CURTHREAD),%rax
487 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
488 jne 3f
489 call handle_ibrs_exit
490 /* Restore preserved registers. */
491 MEXITCOUNT
492 movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
493 movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
494 movq TF_RDX(%rsp),%rdx /* return value 2 */
495 movq TF_RAX(%rsp),%rax /* return value 1 */
496 movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
497 movq TF_RIP(%rsp),%rcx /* original %rip */
498 movq TF_RSP(%rsp),%rsp /* user stack pointer */
499 xorl %r8d,%r8d /* zero the rest of GPRs */
500 xorl %r10d,%r10d
501 cmpb $0,pti
502 je 2f
503 movq PCPU(UCR3),%r9
504 movq %r9,%cr3
505 2: xorl %r9d,%r9d
506 swapgs
507 sysretq
508
509 3: /* AST scheduled. */
510 sti
511 movq %rsp,%rdi
512 call ast
513 jmp 1b
514
515 4: /* Requested full context restore, use doreti for that. */
516 MEXITCOUNT
517 jmp doreti
518
519 /*
520 * Here for CYA insurance, in case a "syscall" instruction gets
521 * issued from 32 bit compatibility mode. MSR_CSTAR has to point
522 * to *something* if EFER_SCE is enabled.
523 */
524 IDTVEC(fast_syscall32)
525 sysret
526
527 /*
528 * DB# handler is very similar to NM#, because 'mov/pop %ss' delay
529 * generation of exception until the next instruction is executed,
530 * which might be a kernel entry. So we must execute the handler
531 * on IST stack and be ready for non-kernel GSBASE.
532 */
533 IDTVEC(dbg)
534 subq $TF_RIP,%rsp
535 movl $(T_TRCTRAP),TF_TRAPNO(%rsp)
536 movq $0,TF_ADDR(%rsp)
537 movq $0,TF_ERR(%rsp)
538 movq %rdi,TF_RDI(%rsp)
539 movq %rsi,TF_RSI(%rsp)
540 movq %rdx,TF_RDX(%rsp)
541 movq %rcx,TF_RCX(%rsp)
542 movq %r8,TF_R8(%rsp)
543 movq %r9,TF_R9(%rsp)
544 movq %rax,TF_RAX(%rsp)
545 movq %rbx,TF_RBX(%rsp)
546 movq %rbp,TF_RBP(%rsp)
547 movq %r10,TF_R10(%rsp)
548 movq %r11,TF_R11(%rsp)
549 movq %r12,TF_R12(%rsp)
550 movq %r13,TF_R13(%rsp)
551 movq %r14,TF_R14(%rsp)
552 movq %r15,TF_R15(%rsp)
553 SAVE_SEGS
554 movl $TF_HASSEGS,TF_FLAGS(%rsp)
555 cld
556 testb $SEL_RPL_MASK,TF_CS(%rsp)
557 jnz dbg_fromuserspace
558 /*
559 * We've interrupted the kernel. Preserve GS.base in %r12,
560 * %cr3 in %r13, and possibly lower half of MSR_IA32_SPEC_CTL in %r14d.
561 */
562 movl $MSR_GSBASE,%ecx
563 rdmsr
564 movq %rax,%r12
565 shlq $32,%rdx
566 orq %rdx,%r12
567 /* Retrieve and load the canonical value for GS.base. */
568 movq TF_SIZE(%rsp),%rdx
569 movl %edx,%eax
570 shrq $32,%rdx
571 wrmsr
572 movq %cr3,%r13
573 movq PCPU(KCR3),%rax
574 cmpq $~0,%rax
575 je 1f
576 movq %rax,%cr3
577 1: testl $CPUID_STDEXT3_IBPB,cpu_stdext_feature3(%rip)
578 je 2f
579 movl $MSR_IA32_SPEC_CTRL,%ecx
580 rdmsr
581 movl %eax,%r14d
582 call handle_ibrs_entry
583 2: FAKE_MCOUNT(TF_RIP(%rsp))
584 movq %rsp,%rdi
585 call trap
586 MEXITCOUNT
587 testl $CPUID_STDEXT3_IBPB,cpu_stdext_feature3(%rip)
588 je 3f
589 movl %r14d,%eax
590 xorl %edx,%edx
591 movl $MSR_IA32_SPEC_CTRL,%ecx
592 wrmsr
593 /*
594 * Put back the preserved MSR_GSBASE value.
595 */
596 3: movl $MSR_GSBASE,%ecx
597 movq %r12,%rdx
598 movl %edx,%eax
599 shrq $32,%rdx
600 wrmsr
601 movq %r13,%cr3
602 RESTORE_REGS
603 addq $TF_RIP,%rsp
604 jmp doreti_iret
605 dbg_fromuserspace:
606 /*
607 * Switch to kernel GSBASE and kernel page table, and copy frame
608 * from the IST stack to the normal kernel stack, since trap()
609 * re-enables interrupts, and since we might trap on DB# while
610 * in trap().
611 */
612 swapgs
613 movq PCPU(KCR3),%rax
614 cmpq $~0,%rax
615 je 1f
616 movq %rax,%cr3
617 1: movq PCPU(RSP0),%rax
618 movl $TF_SIZE,%ecx
619 subq %rcx,%rax
620 movq %rax,%rdi
621 movq %rsp,%rsi
622 rep;movsb
623 movq %rax,%rsp
624 call handle_ibrs_entry
625 movq PCPU(CURPCB),%rdi
626 orl $PCB_FULL_IRET,PCB_FLAGS(%rdi)
627 testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
628 jz 3f
629 cmpw $KUF32SEL,TF_FS(%rsp)
630 jne 2f
631 rdfsbase %rax
632 movq %rax,PCB_FSBASE(%rdi)
633 2: cmpw $KUG32SEL,TF_GS(%rsp)
634 jne 3f
635 movl $MSR_KGSBASE,%ecx
636 rdmsr
637 shlq $32,%rdx
638 orq %rdx,%rax
639 movq %rax,PCB_GSBASE(%rdi)
640 3: jmp calltrap
641
642 /*
643 * NMI handling is special.
644 *
645 * First, NMIs do not respect the state of the processor's RFLAGS.IF
646 * bit. The NMI handler may be entered at any time, including when
647 * the processor is in a critical section with RFLAGS.IF == 0.
648 * The processor's GS.base value could be invalid on entry to the
649 * handler.
650 *
651 * Second, the processor treats NMIs specially, blocking further NMIs
652 * until an 'iretq' instruction is executed. We thus need to execute
653 * the NMI handler with interrupts disabled, to prevent a nested interrupt
654 * from executing an 'iretq' instruction and inadvertently taking the
655 * processor out of NMI mode.
656 *
657 * Third, the NMI handler runs on its own stack (tss_ist2). The canonical
658 * GS.base value for the processor is stored just above the bottom of its
659 * NMI stack. For NMIs taken from kernel mode, the current value in
660 * the processor's GS.base is saved at entry to C-preserved register %r12,
661 * the canonical value for GS.base is then loaded into the processor, and
662 * the saved value is restored at exit time. For NMIs taken from user mode,
663 * the cheaper 'SWAPGS' instructions are used for swapping GS.base.
664 */
665
666 IDTVEC(nmi)
667 subq $TF_RIP,%rsp
668 movl $(T_NMI),TF_TRAPNO(%rsp)
669 movq $0,TF_ADDR(%rsp)
670 movq $0,TF_ERR(%rsp)
671 movq %rdi,TF_RDI(%rsp)
672 movq %rsi,TF_RSI(%rsp)
673 movq %rdx,TF_RDX(%rsp)
674 movq %rcx,TF_RCX(%rsp)
675 movq %r8,TF_R8(%rsp)
676 movq %r9,TF_R9(%rsp)
677 movq %rax,TF_RAX(%rsp)
678 movq %rbx,TF_RBX(%rsp)
679 movq %rbp,TF_RBP(%rsp)
680 movq %r10,TF_R10(%rsp)
681 movq %r11,TF_R11(%rsp)
682 movq %r12,TF_R12(%rsp)
683 movq %r13,TF_R13(%rsp)
684 movq %r14,TF_R14(%rsp)
685 movq %r15,TF_R15(%rsp)
686 SAVE_SEGS
687 movl $TF_HASSEGS,TF_FLAGS(%rsp)
688 cld
689 xorl %ebx,%ebx
690 testb $SEL_RPL_MASK,TF_CS(%rsp)
691 jnz nmi_fromuserspace
692 /*
693 * We've interrupted the kernel. Preserve GS.base in %r12,
694 * %cr3 in %r13, and possibly lower half of MSR_IA32_SPEC_CTL in %r14d.
695 */
696 movl $MSR_GSBASE,%ecx
697 rdmsr
698 movq %rax,%r12
699 shlq $32,%rdx
700 orq %rdx,%r12
701 /* Retrieve and load the canonical value for GS.base. */
702 movq TF_SIZE(%rsp),%rdx
703 movl %edx,%eax
704 shrq $32,%rdx
705 wrmsr
706 movq %cr3,%r13
707 movq PCPU(KCR3),%rax
708 cmpq $~0,%rax
709 je 1f
710 movq %rax,%cr3
711 1: testl $CPUID_STDEXT3_IBPB,cpu_stdext_feature3(%rip)
712 je nmi_calltrap
713 movl $MSR_IA32_SPEC_CTRL,%ecx
714 rdmsr
715 movl %eax,%r14d
716 call handle_ibrs_entry
717 jmp nmi_calltrap
718 nmi_fromuserspace:
719 incl %ebx
720 swapgs
721 movq %cr3,%r13
722 movq PCPU(KCR3),%rax
723 cmpq $~0,%rax
724 je 1f
725 movq %rax,%cr3
726 1: call handle_ibrs_entry
727 movq PCPU(CURPCB),%rdi
728 testq %rdi,%rdi
729 jz 3f
730 orl $PCB_FULL_IRET,PCB_FLAGS(%rdi)
731 testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
732 jz 3f
733 cmpw $KUF32SEL,TF_FS(%rsp)
734 jne 2f
735 rdfsbase %rax
736 movq %rax,PCB_FSBASE(%rdi)
737 2: cmpw $KUG32SEL,TF_GS(%rsp)
738 jne 3f
739 movl $MSR_KGSBASE,%ecx
740 rdmsr
741 shlq $32,%rdx
742 orq %rdx,%rax
743 movq %rax,PCB_GSBASE(%rdi)
744 3:
745 /* Note: this label is also used by ddb and gdb: */
746 nmi_calltrap:
747 FAKE_MCOUNT(TF_RIP(%rsp))
748 movq %rsp,%rdi
749 call trap
750 MEXITCOUNT
751 #ifdef HWPMC_HOOKS
752 /*
753 * Capture a userspace callchain if needed.
754 *
755 * - Check if the current trap was from user mode.
756 * - Check if the current thread is valid.
757 * - Check if the thread requires a user call chain to be
758 * captured.
759 *
760 * We are still in NMI mode at this point.
761 */
762 testl %ebx,%ebx
763 jz nocallchain /* not from userspace */
764 movq PCPU(CURTHREAD),%rax
765 orq %rax,%rax /* curthread present? */
766 jz nocallchain
767 /*
768 * Move execution to the regular kernel stack, because we
769 * committed to return through doreti.
770 */
771 movq %rsp,%rsi /* source stack pointer */
772 movq $TF_SIZE,%rcx
773 movq PCPU(RSP0),%rdx
774 subq %rcx,%rdx
775 movq %rdx,%rdi /* destination stack pointer */
776 shrq $3,%rcx /* trap frame size in long words */
777 cld
778 rep
779 movsq /* copy trapframe */
780 movq %rdx,%rsp /* we are on the regular kstack */
781
782 testl $TDP_CALLCHAIN,TD_PFLAGS(%rax) /* flagged for capture? */
783 jz nocallchain
784 /*
785 * A user callchain is to be captured, so:
786 * - Take the processor out of "NMI" mode by faking an "iret",
787 * to allow for nested NMI interrupts.
788 * - Enable interrupts, so that copyin() can work.
789 */
790 movl %ss,%eax
791 pushq %rax /* tf_ss */
792 pushq %rdx /* tf_rsp (on kernel stack) */
793 pushfq /* tf_rflags */
794 movl %cs,%eax
795 pushq %rax /* tf_cs */
796 pushq $outofnmi /* tf_rip */
797 iretq
798 outofnmi:
799 /*
800 * At this point the processor has exited NMI mode and is running
801 * with interrupts turned off on the normal kernel stack.
802 *
803 * If a pending NMI gets recognized at or after this point, it
804 * will cause a kernel callchain to be traced.
805 *
806 * We turn interrupts back on, and call the user callchain capture hook.
807 */
808 movq pmc_hook,%rax
809 orq %rax,%rax
810 jz nocallchain
811 movq PCPU(CURTHREAD),%rdi /* thread */
812 movq $PMC_FN_USER_CALLCHAIN,%rsi /* command */
813 movq %rsp,%rdx /* frame */
814 sti
815 call *%rax
816 cli
817 nocallchain:
818 #endif
819 testl %ebx,%ebx /* %ebx == 0 => return to userland */
820 jnz doreti_exit
821 /*
822 * Restore speculation control MSR, if preserved.
823 */
824 testl $CPUID_STDEXT3_IBPB,cpu_stdext_feature3(%rip)
825 je 1f
826 movl %r14d,%eax
827 xorl %edx,%edx
828 movl $MSR_IA32_SPEC_CTRL,%ecx
829 wrmsr
830 /*
831 * Put back the preserved MSR_GSBASE value.
832 */
833 1: movl $MSR_GSBASE,%ecx
834 movq %r12,%rdx
835 movl %edx,%eax
836 shrq $32,%rdx
837 wrmsr
838 movq %r13,%cr3
839 RESTORE_REGS
840 addq $TF_RIP,%rsp
841 jmp doreti_iret
842
843 /*
844 * MC# handling is similar to NMI.
845 *
846 * As with NMIs, machine check exceptions do not respect RFLAGS.IF and
847 * can occur at any time with a GS.base value that does not correspond
848 * to the privilege level in CS.
849 *
850 * Machine checks are not unblocked by iretq, but it is best to run
851 * the handler with interrupts disabled since the exception may have
852 * interrupted a critical section.
853 *
854 * The MC# handler runs on its own stack (tss_ist3). The canonical
855 * GS.base value for the processor is stored just above the bottom of
856 * its MC# stack. For exceptions taken from kernel mode, the current
857 * value in the processor's GS.base is saved at entry to C-preserved
858 * register %r12, the canonical value for GS.base is then loaded into
859 * the processor, and the saved value is restored at exit time. For
860 * exceptions taken from user mode, the cheaper 'SWAPGS' instructions
861 * are used for swapping GS.base.
862 */
863
864 IDTVEC(mchk)
865 subq $TF_RIP,%rsp
866 movl $(T_MCHK),TF_TRAPNO(%rsp)
867 movq $0,TF_ADDR(%rsp)
868 movq $0,TF_ERR(%rsp)
869 movq %rdi,TF_RDI(%rsp)
870 movq %rsi,TF_RSI(%rsp)
871 movq %rdx,TF_RDX(%rsp)
872 movq %rcx,TF_RCX(%rsp)
873 movq %r8,TF_R8(%rsp)
874 movq %r9,TF_R9(%rsp)
875 movq %rax,TF_RAX(%rsp)
876 movq %rbx,TF_RBX(%rsp)
877 movq %rbp,TF_RBP(%rsp)
878 movq %r10,TF_R10(%rsp)
879 movq %r11,TF_R11(%rsp)
880 movq %r12,TF_R12(%rsp)
881 movq %r13,TF_R13(%rsp)
882 movq %r14,TF_R14(%rsp)
883 movq %r15,TF_R15(%rsp)
884 SAVE_SEGS
885 movl $TF_HASSEGS,TF_FLAGS(%rsp)
886 cld
887 xorl %ebx,%ebx
888 testb $SEL_RPL_MASK,TF_CS(%rsp)
889 jnz mchk_fromuserspace
890 /*
891 * We've interrupted the kernel. Preserve GS.base in %r12,
892 * %cr3 in %r13, and possibly lower half of MSR_IA32_SPEC_CTL in %r14d.
893 */
894 movl $MSR_GSBASE,%ecx
895 rdmsr
896 movq %rax,%r12
897 shlq $32,%rdx
898 orq %rdx,%r12
899 /* Retrieve and load the canonical value for GS.base. */
900 movq TF_SIZE(%rsp),%rdx
901 movl %edx,%eax
902 shrq $32,%rdx
903 wrmsr
904 movq %cr3,%r13
905 movq PCPU(KCR3),%rax
906 cmpq $~0,%rax
907 je 1f
908 movq %rax,%cr3
909 1: testl $CPUID_STDEXT3_IBPB,cpu_stdext_feature3(%rip)
910 je mchk_calltrap
911 movl $MSR_IA32_SPEC_CTRL,%ecx
912 rdmsr
913 movl %eax,%r14d
914 call handle_ibrs_entry
915 jmp mchk_calltrap
916 mchk_fromuserspace:
917 incl %ebx
918 swapgs
919 movq %cr3,%r13
920 movq PCPU(KCR3),%rax
921 cmpq $~0,%rax
922 je 1f
923 movq %rax,%cr3
924 1: call handle_ibrs_entry
925 /* Note: this label is also used by ddb and gdb: */
926 mchk_calltrap:
927 FAKE_MCOUNT(TF_RIP(%rsp))
928 movq %rsp,%rdi
929 call mca_intr
930 MEXITCOUNT
931 testl %ebx,%ebx /* %ebx == 0 => return to userland */
932 jnz doreti_exit
933 /*
934 * Restore speculation control MSR, if preserved.
935 */
936 testl $CPUID_STDEXT3_IBPB,cpu_stdext_feature3(%rip)
937 je 1f
938 movl %r14d,%eax
939 xorl %edx,%edx
940 movl $MSR_IA32_SPEC_CTRL,%ecx
941 wrmsr
942 /*
943 * Put back the preserved MSR_GSBASE value.
944 */
945 1: movl $MSR_GSBASE,%ecx
946 movq %r12,%rdx
947 movl %edx,%eax
948 shrq $32,%rdx
949 wrmsr
950 movq %r13,%cr3
951 RESTORE_REGS
952 addq $TF_RIP,%rsp
953 jmp doreti_iret
954
955 ENTRY(fork_trampoline)
956 movq %r12,%rdi /* function */
957 movq %rbx,%rsi /* arg1 */
958 movq %rsp,%rdx /* trapframe pointer */
959 call fork_exit
960 MEXITCOUNT
961 jmp doreti /* Handle any ASTs */
962
963 /*
964 * To efficiently implement classification of trap and interrupt handlers
965 * for profiling, there must be only trap handlers between the labels btrap
966 * and bintr, and only interrupt handlers between the labels bintr and
967 * eintr. This is implemented (partly) by including files that contain
968 * some of the handlers. Before including the files, set up a normal asm
969 * environment so that the included files doen't need to know that they are
970 * included.
971 */
972
973 #ifdef COMPAT_FREEBSD32
974 .data
975 .p2align 4
976 .text
977 SUPERALIGN_TEXT
978
979 #include <amd64/ia32/ia32_exception.S>
980 #endif
981
982 .data
983 .p2align 4
984 .text
985 SUPERALIGN_TEXT
986 MCOUNT_LABEL(bintr)
987
988 #include <amd64/amd64/apic_vector.S>
989
990 #ifdef DEV_ATPIC
991 .data
992 .p2align 4
993 .text
994 SUPERALIGN_TEXT
995
996 #include <amd64/amd64/atpic_vector.S>
997 #endif
998
999 .text
1000 MCOUNT_LABEL(eintr)
1001
1002 /*
1003 * void doreti(struct trapframe)
1004 *
1005 * Handle return from interrupts, traps and syscalls.
1006 */
1007 .text
1008 SUPERALIGN_TEXT
1009 .type doreti,@function
1010 .globl doreti
1011 doreti:
1012 FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
1013 /*
1014 * Check if ASTs can be handled now.
1015 */
1016 testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */
1017 jz doreti_exit /* can't handle ASTs now if not */
1018
1019 doreti_ast:
1020 /*
1021 * Check for ASTs atomically with returning. Disabling CPU
1022 * interrupts provides sufficient locking even in the SMP case,
1023 * since we will be informed of any new ASTs by an IPI.
1024 */
1025 cli
1026 movq PCPU(CURTHREAD),%rax
1027 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
1028 je doreti_exit
1029 sti
1030 movq %rsp,%rdi /* pass a pointer to the trapframe */
1031 call ast
1032 jmp doreti_ast
1033
1034 /*
1035 * doreti_exit: pop registers, iret.
1036 *
1037 * The segment register pop is a special case, since it may
1038 * fault if (for example) a sigreturn specifies bad segment
1039 * registers. The fault is handled in trap.c.
1040 */
1041 doreti_exit:
1042 MEXITCOUNT
1043 movq PCPU(CURPCB),%r8
1044
1045 /*
1046 * Do not reload segment registers for kernel.
1047 * Since we do not reload segments registers with sane
1048 * values on kernel entry, descriptors referenced by
1049 * segments registers might be not valid. This is fatal
1050 * for user mode, but is not a problem for the kernel.
1051 */
1052 testb $SEL_RPL_MASK,TF_CS(%rsp)
1053 jz ld_regs
1054 testl $PCB_FULL_IRET,PCB_FLAGS(%r8)
1055 jz ld_regs
1056 andl $~PCB_FULL_IRET,PCB_FLAGS(%r8)
1057 testl $TF_HASSEGS,TF_FLAGS(%rsp)
1058 je set_segs
1059
1060 do_segs:
1061 /* Restore %fs and fsbase */
1062 movw TF_FS(%rsp),%ax
1063 .globl ld_fs
1064 ld_fs:
1065 movw %ax,%fs
1066 cmpw $KUF32SEL,%ax
1067 jne 1f
1068 movl $MSR_FSBASE,%ecx
1069 movl PCB_FSBASE(%r8),%eax
1070 movl PCB_FSBASE+4(%r8),%edx
1071 .globl ld_fsbase
1072 ld_fsbase:
1073 wrmsr
1074 1:
1075 /* Restore %gs and gsbase */
1076 movw TF_GS(%rsp),%si
1077 pushfq
1078 cli
1079 movl $MSR_GSBASE,%ecx
1080 /* Save current kernel %gs base into %r12d:%r13d */
1081 rdmsr
1082 movl %eax,%r12d
1083 movl %edx,%r13d
1084 .globl ld_gs
1085 ld_gs:
1086 movw %si,%gs
1087 /* Save user %gs base into %r14d:%r15d */
1088 rdmsr
1089 movl %eax,%r14d
1090 movl %edx,%r15d
1091 /* Restore kernel %gs base */
1092 movl %r12d,%eax
1093 movl %r13d,%edx
1094 wrmsr
1095 popfq
1096 /*
1097 * Restore user %gs base, either from PCB if used for TLS, or
1098 * from the previously saved msr read.
1099 */
1100 movl $MSR_KGSBASE,%ecx
1101 cmpw $KUG32SEL,%si
1102 jne 1f
1103 movl PCB_GSBASE(%r8),%eax
1104 movl PCB_GSBASE+4(%r8),%edx
1105 jmp ld_gsbase
1106 1:
1107 movl %r14d,%eax
1108 movl %r15d,%edx
1109 .globl ld_gsbase
1110 ld_gsbase:
1111 wrmsr /* May trap if non-canonical, but only for TLS. */
1112 .globl ld_es
1113 ld_es:
1114 movw TF_ES(%rsp),%es
1115 .globl ld_ds
1116 ld_ds:
1117 movw TF_DS(%rsp),%ds
1118 ld_regs:
1119 RESTORE_REGS
1120 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
1121 jz 2f /* keep running with kernel GS.base */
1122 cli
1123 call handle_ibrs_exit_rs
1124 cmpb $0,pti
1125 je 1f
1126 pushq %rdx
1127 movq PCPU(PRVSPACE),%rdx
1128 addq $PC_PTI_STACK+PC_PTI_STACK_SZ*8-PTI_SIZE,%rdx
1129 movq %rax,PTI_RAX(%rdx)
1130 popq %rax
1131 movq %rax,PTI_RDX(%rdx)
1132 movq TF_RIP(%rsp),%rax
1133 movq %rax,PTI_RIP(%rdx)
1134 movq TF_CS(%rsp),%rax
1135 movq %rax,PTI_CS(%rdx)
1136 movq TF_RFLAGS(%rsp),%rax
1137 movq %rax,PTI_RFLAGS(%rdx)
1138 movq TF_RSP(%rsp),%rax
1139 movq %rax,PTI_RSP(%rdx)
1140 movq TF_SS(%rsp),%rax
1141 movq %rax,PTI_SS(%rdx)
1142 movq PCPU(UCR3),%rax
1143 swapgs
1144 movq %rdx,%rsp
1145 movq %rax,%cr3
1146 popq %rdx
1147 popq %rax
1148 addq $8,%rsp
1149 jmp doreti_iret
1150 1: swapgs
1151 2: addq $TF_RIP,%rsp
1152 .globl doreti_iret
1153 doreti_iret:
1154 iretq
1155
1156 set_segs:
1157 movw $KUDSEL,%ax
1158 movw %ax,TF_DS(%rsp)
1159 movw %ax,TF_ES(%rsp)
1160 movw $KUF32SEL,TF_FS(%rsp)
1161 movw $KUG32SEL,TF_GS(%rsp)
1162 jmp do_segs
1163
1164 /*
1165 * doreti_iret_fault. Alternative return code for
1166 * the case where we get a fault in the doreti_exit code
1167 * above. trap() (amd64/amd64/trap.c) catches this specific
1168 * case, sends the process a signal and continues in the
1169 * corresponding place in the code below.
1170 */
1171 ALIGN_TEXT
1172 .globl doreti_iret_fault
1173 doreti_iret_fault:
1174 subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */
1175 movq %rax,TF_RAX(%rsp)
1176 movq %rdx,TF_RDX(%rsp)
1177 movq %rcx,TF_RCX(%rsp)
1178 call handle_ibrs_entry
1179 testb $SEL_RPL_MASK,TF_CS(%rsp)
1180 jz 1f
1181 sti
1182 1:
1183 SAVE_SEGS
1184 movl $TF_HASSEGS,TF_FLAGS(%rsp)
1185 movq %rdi,TF_RDI(%rsp)
1186 movq %rsi,TF_RSI(%rsp)
1187 movq %r8,TF_R8(%rsp)
1188 movq %r9,TF_R9(%rsp)
1189 movq %rbx,TF_RBX(%rsp)
1190 movq %rbp,TF_RBP(%rsp)
1191 movq %r10,TF_R10(%rsp)
1192 movq %r11,TF_R11(%rsp)
1193 movq %r12,TF_R12(%rsp)
1194 movq %r13,TF_R13(%rsp)
1195 movq %r14,TF_R14(%rsp)
1196 movq %r15,TF_R15(%rsp)
1197 movl $T_PROTFLT,TF_TRAPNO(%rsp)
1198 movq $0,TF_ERR(%rsp) /* XXX should be the error code */
1199 movq $0,TF_ADDR(%rsp)
1200 FAKE_MCOUNT(TF_RIP(%rsp))
1201 jmp calltrap
1202
1203 ALIGN_TEXT
1204 .globl ds_load_fault
1205 ds_load_fault:
1206 movl $T_PROTFLT,TF_TRAPNO(%rsp)
1207 testb $SEL_RPL_MASK,TF_CS(%rsp)
1208 jz 1f
1209 sti
1210 1:
1211 movq %rsp,%rdi
1212 call trap
1213 movw $KUDSEL,TF_DS(%rsp)
1214 jmp doreti
1215
1216 ALIGN_TEXT
1217 .globl es_load_fault
1218 es_load_fault:
1219 movl $T_PROTFLT,TF_TRAPNO(%rsp)
1220 testl $PSL_I,TF_RFLAGS(%rsp)
1221 jz 1f
1222 sti
1223 1:
1224 movq %rsp,%rdi
1225 call trap
1226 movw $KUDSEL,TF_ES(%rsp)
1227 jmp doreti
1228
1229 ALIGN_TEXT
1230 .globl fs_load_fault
1231 fs_load_fault:
1232 testl $PSL_I,TF_RFLAGS(%rsp)
1233 jz 1f
1234 sti
1235 1:
1236 movl $T_PROTFLT,TF_TRAPNO(%rsp)
1237 movq %rsp,%rdi
1238 call trap
1239 movw $KUF32SEL,TF_FS(%rsp)
1240 jmp doreti
1241
1242 ALIGN_TEXT
1243 .globl gs_load_fault
1244 gs_load_fault:
1245 popfq
1246 movl $T_PROTFLT,TF_TRAPNO(%rsp)
1247 testl $PSL_I,TF_RFLAGS(%rsp)
1248 jz 1f
1249 sti
1250 1:
1251 movq %rsp,%rdi
1252 call trap
1253 movw $KUG32SEL,TF_GS(%rsp)
1254 jmp doreti
1255
1256 ALIGN_TEXT
1257 .globl fsbase_load_fault
1258 fsbase_load_fault:
1259 movl $T_PROTFLT,TF_TRAPNO(%rsp)
1260 testl $PSL_I,TF_RFLAGS(%rsp)
1261 jz 1f
1262 sti
1263 1:
1264 movq %rsp,%rdi
1265 call trap
1266 movq PCPU(CURTHREAD),%r8
1267 movq TD_PCB(%r8),%r8
1268 movq $0,PCB_FSBASE(%r8)
1269 jmp doreti
1270
1271 ALIGN_TEXT
1272 .globl gsbase_load_fault
1273 gsbase_load_fault:
1274 movl $T_PROTFLT,TF_TRAPNO(%rsp)
1275 testl $PSL_I,TF_RFLAGS(%rsp)
1276 jz 1f
1277 sti
1278 1:
1279 movq %rsp,%rdi
1280 call trap
1281 movq PCPU(CURTHREAD),%r8
1282 movq TD_PCB(%r8),%r8
1283 movq $0,PCB_GSBASE(%r8)
1284 jmp doreti
1285
1286 #ifdef HWPMC_HOOKS
1287 ENTRY(end_exceptions)
1288 #endif
Cache object: 99566bb13c44cb61b74bf2066b6f4ed9
|