1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $FreeBSD: releng/5.2/sys/amd64/amd64/exception.S 122849 2003-11-17 08:58:16Z peter $
35 */
36
37 #include <machine/asmacros.h>
38 #include <machine/psl.h>
39 #include <machine/trap.h>
40
41 #include "assym.s"
42
43 .text
44
45 /*****************************************************************************/
46 /* Trap handling */
47 /*****************************************************************************/
48 /*
49 * Trap and fault vector routines.
50 *
51 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes
52 * state on the stack but also disables interrupts. This is important for
53 * us for the use of the swapgs instruction. We cannot be interrupted
54 * until the GS.base value is correct. For most traps, we automatically
55 * then enable interrupts if the interrupted context had them enabled.
56 * This is equivalent to the i386 port's use of SDT_SYS386TGT.
57 *
58 * The cpu will push a certain amount of state onto the kernel stack for
59 * the current process. See amd64/include/frame.h.
60 * This includes the current RFLAGS (status register, which includes
61 * the interrupt disable state prior to the trap), the code segment register,
62 * and the return instruction pointer are pushed by the cpu. The cpu
63 * will also push an 'error' code for certain traps. We push a dummy
64 * error code for those traps where the cpu doesn't in order to maintain
65 * a consistent frame. We also push a contrived 'trap number'.
66 *
67 * The cpu does not push the general registers, we must do that, and we
68 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and
69 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
70 * must load them with appropriate values for supervisor mode operation.
71 */
72
73 MCOUNT_LABEL(user)
74 MCOUNT_LABEL(btrap)
75
76 /* Traps that we leave interrupts disabled for.. */
77 #define TRAP_NOEN(a) \
78 subq $TF_RIP,%rsp; \
79 movq $(a),TF_TRAPNO(%rsp) ; \
80 movq $0,TF_ADDR(%rsp) ; \
81 movq $0,TF_ERR(%rsp) ; \
82 jmp alltraps_noen
83 IDTVEC(dbg)
84 TRAP_NOEN(T_TRCTRAP)
85 IDTVEC(bpt)
86 TRAP_NOEN(T_BPTFLT)
87
88 /* Regular traps; The cpu does not supply tf_err for these. */
89 #define TRAP(a) \
90 subq $TF_RIP,%rsp; \
91 movq $(a),TF_TRAPNO(%rsp) ; \
92 movq $0,TF_ADDR(%rsp) ; \
93 movq $0,TF_ERR(%rsp) ; \
94 jmp alltraps
95 IDTVEC(div)
96 TRAP(T_DIVIDE)
97 IDTVEC(nmi)
98 TRAP(T_NMI)
99 IDTVEC(ofl)
100 TRAP(T_OFLOW)
101 IDTVEC(bnd)
102 TRAP(T_BOUND)
103 IDTVEC(ill)
104 TRAP(T_PRIVINFLT)
105 IDTVEC(dna)
106 TRAP(T_DNA)
107 IDTVEC(fpusegm)
108 TRAP(T_FPOPFLT)
109 IDTVEC(mchk)
110 TRAP(T_MCHK)
111 IDTVEC(rsvd)
112 TRAP(T_RESERVED)
113 IDTVEC(fpu)
114 TRAP(T_ARITHTRAP)
115 IDTVEC(xmm)
116 TRAP(T_XMMFLT)
117
118 /* This group of traps have tf_err already pushed by the cpu */
119 #define TRAP_ERR(a) \
120 subq $TF_ERR,%rsp; \
121 movq $(a),TF_TRAPNO(%rsp) ; \
122 movq $0,TF_ADDR(%rsp) ; \
123 jmp alltraps_noen
124 IDTVEC(tss)
125 TRAP_ERR(T_TSSFLT)
126 IDTVEC(missing)
127 TRAP_ERR(T_SEGNPFLT)
128 IDTVEC(stk)
129 TRAP_ERR(T_STKFLT)
130 IDTVEC(prot)
131 TRAP_ERR(T_PROTFLT)
132 IDTVEC(align)
133 TRAP_ERR(T_ALIGNFLT)
134
135 /*
136 * alltraps entry point. Use swapgs if this is the first time in the
137 * kernel from userland. Reenable interrupts if they were enabled
138 * before the trap. This approximates SDT_SYS386TGT on the i386 port.
139 */
140
141 SUPERALIGN_TEXT
142 .globl alltraps
143 .type alltraps,@function
144 alltraps:
145 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
146 jz alltraps_testi /* already running with kernel GS.base */
147 swapgs
148 alltraps_testi:
149 testl $PSL_I,TF_RFLAGS(%rsp)
150 jz alltraps_pushregs
151 sti
152 alltraps_pushregs:
153 movq %rdi,TF_RDI(%rsp)
154 alltraps_pushregs_no_rdi:
155 movq %rsi,TF_RSI(%rsp)
156 movq %rdx,TF_RDX(%rsp)
157 movq %rcx,TF_RCX(%rsp)
158 movq %r8,TF_R8(%rsp)
159 movq %r9,TF_R9(%rsp)
160 movq %rax,TF_RAX(%rsp)
161 movq %rbx,TF_RBX(%rsp)
162 movq %rbp,TF_RBP(%rsp)
163 movq %r10,TF_R10(%rsp)
164 movq %r11,TF_R11(%rsp)
165 movq %r12,TF_R12(%rsp)
166 movq %r13,TF_R13(%rsp)
167 movq %r14,TF_R14(%rsp)
168 movq %r15,TF_R15(%rsp)
169 alltraps_with_regs_pushed:
170 calltrap:
171 call trap
172 jmp doreti /* Handle any pending ASTs */
173
174 /*
175 * alltraps_noen entry point. Unlike alltraps above, we want to
176 * leave the interrupts disabled. This corresponds to
177 * SDT_SYS386IGT on the i386 port.
178 */
179 SUPERALIGN_TEXT
180 .globl alltraps_noen
181 .type alltraps_noen,@function
182 alltraps_noen:
183 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
184 jz alltraps_pushregs /* already running with kernel GS.base */
185 swapgs
186 jmp alltraps_pushregs
187
188 IDTVEC(dblfault)
189 subq $TF_ERR,%rsp
190 movq $T_DOUBLEFLT,TF_TRAPNO(%rsp)
191 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
192 jz 1f /* already running with kernel GS.base */
193 swapgs
194 1: call dblfault_handler
195 2: hlt
196 jmp 2b
197
198 IDTVEC(page)
199 subq $TF_ERR,%rsp
200 movq $T_PAGEFLT,TF_TRAPNO(%rsp)
201 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
202 jz 1f /* already running with kernel GS.base */
203 swapgs
204 1: movq %rdi,TF_RDI(%rsp) /* free up a GP register */
205 movq %cr2,%rdi /* preserve %cr2 before .. */
206 movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */
207 testl $PSL_I,TF_RFLAGS(%rsp)
208 jz alltraps_pushregs_no_rdi
209 sti
210 jmp alltraps_pushregs_no_rdi
211
212 /*
213 * Fast syscall entry point. We enter here with just our new %cs/%ss set,
214 * and the new privilige level. We are still running on the old user stack
215 * pointer. We have to juggle a few things around to find our stack etc.
216 * swapgs gives us access to our PCPU space only.
217 */
218 IDTVEC(fast_syscall)
219 swapgs
220 movq %rsp,PCPU(SCRATCH_RSP)
221 movq PCPU(RSP0),%rsp
222 /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */
223 subq $TF_SIZE,%rsp
224 /* defer TF_RSP till we have a spare register */
225 movq %r11,TF_RFLAGS(%rsp)
226 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
227 movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */
228 movq %r11,TF_RSP(%rsp) /* user stack pointer */
229 sti
230 movq $KUDSEL,TF_SS(%rsp)
231 movq $KUCSEL,TF_CS(%rsp)
232 movq $2,TF_ERR(%rsp)
233 movq %rdi,TF_RDI(%rsp) /* arg 1 */
234 movq %rsi,TF_RSI(%rsp) /* arg 2 */
235 movq %rdx,TF_RDX(%rsp) /* arg 3 */
236 movq %r10,TF_RCX(%rsp) /* arg 4 */
237 movq %r8,TF_R8(%rsp) /* arg 5 */
238 movq %r9,TF_R9(%rsp) /* arg 6 */
239 movq %rax,TF_RAX(%rsp) /* syscall number */
240 movq %rbx,TF_RBX(%rsp) /* C preserved */
241 movq %rbp,TF_RBP(%rsp) /* C preserved */
242 movq %r12,TF_R12(%rsp) /* C preserved */
243 movq %r13,TF_R13(%rsp) /* C preserved */
244 movq %r14,TF_R14(%rsp) /* C preserved */
245 movq %r15,TF_R15(%rsp) /* C preserved */
246 call syscall
247 movq PCPU(CURPCB),%rax
248 testq $PCB_FULLCTX,PCB_FLAGS(%rax)
249 jne 3f
250 1: /* Check for and handle AST's on return to userland */
251 cli
252 movq PCPU(CURTHREAD),%rax
253 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
254 je 2f
255 sti
256 movq %rsp, %rdi
257 call ast
258 jmp 1b
259 2: /* restore preserved registers */
260 movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
261 movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
262 movq TF_RDX(%rsp),%rdx /* return value 2 */
263 movq TF_RAX(%rsp),%rax /* return value 1 */
264 movq TF_RBX(%rsp),%rbx /* C preserved */
265 movq TF_RBP(%rsp),%rbp /* C preserved */
266 movq TF_R12(%rsp),%r12 /* C preserved */
267 movq TF_R13(%rsp),%r13 /* C preserved */
268 movq TF_R14(%rsp),%r14 /* C preserved */
269 movq TF_R15(%rsp),%r15 /* C preserved */
270 movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
271 movq TF_RIP(%rsp),%rcx /* original %rip */
272 movq TF_RSP(%rsp),%r9 /* user stack pointer */
273 movq %r9,%rsp /* original %rsp */
274 swapgs
275 sysretq
276 3: /* Requested full context restore, use doreti for that */
277 andq $~PCB_FULLCTX,PCB_FLAGS(%rax)
278 jmp doreti
279
280 /*
281 * Here for CYA insurance, in case a "syscall" instruction gets
282 * issued from 32 bit compatability mode. MSR_CSTAR has to point
283 * to *something* if EFER_SCE is enabled.
284 */
285 IDTVEC(fast_syscall32)
286 sysret
287
288 ENTRY(fork_trampoline)
289 movq %r12, %rdi /* function */
290 movq %rbx, %rsi /* arg1 */
291 movq %rsp, %rdx /* trapframe pointer */
292 call fork_exit
293 jmp doreti /* Handle any ASTs */
294
295 .data
296 ALIGN_DATA
297
298 /*
299 * void doreti(struct trapframe)
300 *
301 * Handle return from interrupts, traps and syscalls.
302 */
303 .text
304 SUPERALIGN_TEXT
305 .globl doreti
306 .type doreti,@function
307 doreti:
308 /*
309 * Check if ASTs can be handled now.
310 */
311 testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */
312 jz doreti_exit /* can't handle ASTs now if not */
313
314 doreti_ast:
315 /*
316 * Check for ASTs atomically with returning. Disabling CPU
317 * interrupts provides sufficient locking eve in the SMP case,
318 * since we will be informed of any new ASTs by an IPI.
319 */
320 cli
321 movq PCPU(CURTHREAD),%rax
322 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
323 je doreti_exit
324 sti
325 movq %rsp, %rdi /* pass a pointer to the trapframe */
326 call ast
327 jmp doreti_ast
328
329 /*
330 * doreti_exit: pop registers, iret.
331 *
332 * The segment register pop is a special case, since it may
333 * fault if (for example) a sigreturn specifies bad segment
334 * registers. The fault is handled in trap.c.
335 */
336 doreti_exit:
337 movq TF_RDI(%rsp),%rdi
338 movq TF_RSI(%rsp),%rsi
339 movq TF_RDX(%rsp),%rdx
340 movq TF_RCX(%rsp),%rcx
341 movq TF_R8(%rsp),%r8
342 movq TF_R9(%rsp),%r9
343 movq TF_RAX(%rsp),%rax
344 movq TF_RBX(%rsp),%rbx
345 movq TF_RBP(%rsp),%rbp
346 movq TF_R10(%rsp),%r10
347 movq TF_R11(%rsp),%r11
348 movq TF_R12(%rsp),%r12
349 movq TF_R13(%rsp),%r13
350 movq TF_R14(%rsp),%r14
351 movq TF_R15(%rsp),%r15
352 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
353 jz 1f /* keep running with kernel GS.base */
354 cli
355 swapgs
356 1: addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */
357 .globl doreti_iret
358 doreti_iret:
359 iretq
360
361 /*
362 * doreti_iret_fault and friends. Alternative return code for
363 * the case where we get a fault in the doreti_exit code
364 * above. trap() (i386/i386/trap.c) catches this specific
365 * case, sends the process a signal and continues in the
366 * corresponding place in the code below.
367 */
368 ALIGN_TEXT
369 .globl doreti_iret_fault
370 doreti_iret_fault:
371 subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */
372 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
373 jz 1f /* already running with kernel GS.base */
374 swapgs
375 1: testl $PSL_I,TF_RFLAGS(%rsp)
376 jz 2f
377 sti
378 2: movq %rdi,TF_RDI(%rsp)
379 movq %rsi,TF_RSI(%rsp)
380 movq %rdx,TF_RDX(%rsp)
381 movq %rcx,TF_RCX(%rsp)
382 movq %r8,TF_R8(%rsp)
383 movq %r9,TF_R9(%rsp)
384 movq %rax,TF_RAX(%rsp)
385 movq %rbx,TF_RBX(%rsp)
386 movq %rbp,TF_RBP(%rsp)
387 movq %r10,TF_R10(%rsp)
388 movq %r11,TF_R11(%rsp)
389 movq %r12,TF_R12(%rsp)
390 movq %r13,TF_R13(%rsp)
391 movq %r14,TF_R14(%rsp)
392 movq %r15,TF_R15(%rsp)
393 movq $T_PROTFLT,TF_TRAPNO(%rsp)
394 movq $0,TF_ERR(%rsp) /* XXX should be the error code */
395 jmp alltraps_with_regs_pushed
Cache object: 7072bfbf0766ffb6f7ede74e5ed586b7
|