1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $FreeBSD: releng/5.1/sys/amd64/amd64/exception.S 114987 2003-05-14 04:10:49Z peter $
35 */
36
37 #include <machine/asmacros.h>
38 #include <sys/mutex.h>
39 #include <machine/psl.h>
40 #include <machine/trap.h>
41
42 #include "assym.s"
43
44 #define SEL_RPL_MASK 0x0003
45
46 .text
47
48 /*****************************************************************************/
49 /* Trap handling */
50 /*****************************************************************************/
51 /*
52 * Trap and fault vector routines.
53 *
54 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes
55 * state on the stack but also disables interrupts. This is important for
56 * us for the use of the swapgs instruction. We cannot be interrupted
57 * until the GS.base value is correct. For most traps, we automatically
58 * then enable interrupts if the interrupted context had them enabled.
59 * This is equivalent to the i386 port's use of SDT_SYS386TGT.
60 *
61 * The cpu will push a certain amount of state onto the kernel stack for
62 * the current process. See amd64/include/frame.h.
63 * This includes the current RFLAGS (status register, which includes
64 * the interrupt disable state prior to the trap), the code segment register,
65 * and the return instruction pointer are pushed by the cpu. The cpu
66 * will also push an 'error' code for certain traps. We push a dummy
67 * error code for those traps where the cpu doesn't in order to maintain
68 * a consistent frame. We also push a contrived 'trap number'.
69 *
70 * The cpu does not push the general registers, we must do that, and we
71 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and
72 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
73 * must load them with appropriate values for supervisor mode operation.
74 */
75 #define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \
76 .type __CONCAT(X,name),@function; __CONCAT(X,name):
77
78 MCOUNT_LABEL(user)
79 MCOUNT_LABEL(btrap)
80
81 /* Traps that we leave interrupts disabled for.. */
82 #define TRAP_NOEN(a) \
83 subq $TF_RIP,%rsp; \
84 movq $(a),TF_TRAPNO(%rsp) ; \
85 movq $0,TF_ADDR(%rsp) ; \
86 movq $0,TF_ERR(%rsp) ; \
87 jmp alltraps_noen
88 IDTVEC(dbg)
89 TRAP_NOEN(T_TRCTRAP)
90 IDTVEC(bpt)
91 TRAP_NOEN(T_BPTFLT)
92
93 /* Regular traps; The cpu does not supply tf_err for these. */
94 #define TRAP(a) \
95 subq $TF_RIP,%rsp; \
96 movq $(a),TF_TRAPNO(%rsp) ; \
97 movq $0,TF_ADDR(%rsp) ; \
98 movq $0,TF_ERR(%rsp) ; \
99 jmp alltraps
100 IDTVEC(div)
101 TRAP(T_DIVIDE)
102 IDTVEC(nmi)
103 TRAP(T_NMI)
104 IDTVEC(ofl)
105 TRAP(T_OFLOW)
106 IDTVEC(bnd)
107 TRAP(T_BOUND)
108 IDTVEC(ill)
109 TRAP(T_PRIVINFLT)
110 IDTVEC(dna)
111 TRAP(T_DNA)
112 IDTVEC(fpusegm)
113 TRAP(T_FPOPFLT)
114 IDTVEC(mchk)
115 TRAP(T_MCHK)
116 IDTVEC(rsvd)
117 TRAP(T_RESERVED)
118 IDTVEC(fpu)
119 TRAP(T_ARITHTRAP)
120 IDTVEC(xmm)
121 TRAP(T_XMMFLT)
122
123 /* This group of traps have tf_err already pushed by the cpu */
124 #define TRAP_ERR(a) \
125 subq $TF_ERR,%rsp; \
126 movq $(a),TF_TRAPNO(%rsp) ; \
127 movq $0,TF_ADDR(%rsp) ; \
128 jmp alltraps_noen
129 IDTVEC(tss)
130 TRAP_ERR(T_TSSFLT)
131 IDTVEC(missing)
132 TRAP_ERR(T_SEGNPFLT)
133 IDTVEC(stk)
134 TRAP_ERR(T_STKFLT)
135 IDTVEC(prot)
136 TRAP_ERR(T_PROTFLT)
137 IDTVEC(align)
138 TRAP_ERR(T_ALIGNFLT)
139
140 /*
141 * alltraps entry point. Use swapgs if this is the first time in the
142 * kernel from userland. Reenable interrupts if they were enabled
143 * before the trap. This approximates SDT_SYS386TGT on the i386 port.
144 */
145
146 SUPERALIGN_TEXT
147 .globl alltraps
148 .type alltraps,@function
149 alltraps:
150 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
151 jz alltraps_testi /* already running with kernel GS.base */
152 swapgs
153 alltraps_testi:
154 testl $PSL_I,TF_RFLAGS(%rsp)
155 jz alltraps_pushregs
156 sti
157 alltraps_pushregs:
158 movq %rdi,TF_RDI(%rsp)
159 alltraps_pushregs_no_rdi:
160 movq %rsi,TF_RSI(%rsp)
161 movq %rdx,TF_RDX(%rsp)
162 movq %rcx,TF_RCX(%rsp)
163 movq %r8,TF_R8(%rsp)
164 movq %r9,TF_R9(%rsp)
165 movq %rax,TF_RAX(%rsp)
166 movq %rbx,TF_RBX(%rsp)
167 movq %rbp,TF_RBP(%rsp)
168 movq %r10,TF_R10(%rsp)
169 movq %r11,TF_R11(%rsp)
170 movq %r12,TF_R12(%rsp)
171 movq %r13,TF_R13(%rsp)
172 movq %r14,TF_R14(%rsp)
173 movq %r15,TF_R15(%rsp)
174 alltraps_with_regs_pushed:
175 FAKE_MCOUNT(13*4(%rsp))
176 calltrap:
177 FAKE_MCOUNT(btrap) /* init "from" btrap -> calltrap */
178 call trap
179 MEXITCOUNT
180 jmp doreti /* Handle any pending ASTs */
181
182 /*
183 * alltraps_noen entry point. Unlike alltraps above, we want to
184 * leave the interrupts disabled. This corresponds to
185 * SDT_SYS386IGT on the i386 port.
186 */
187 SUPERALIGN_TEXT
188 .globl alltraps_noen
189 .type alltraps_noen,@function
190 alltraps_noen:
191 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
192 jz alltraps_pushregs /* already running with kernel GS.base */
193 swapgs
194 jmp alltraps_pushregs
195
196 IDTVEC(dblfault)
197 subq $TF_ERR,%rsp
198 movq $T_DOUBLEFLT,TF_TRAPNO(%rsp)
199 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
200 jz 1f /* already running with kernel GS.base */
201 swapgs
202 1: call dblfault_handler
203 2: hlt
204 jmp 2b
205
206 IDTVEC(page)
207 subq $TF_ERR,%rsp
208 movq $T_PAGEFLT,TF_TRAPNO(%rsp)
209 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
210 jz 1f /* already running with kernel GS.base */
211 swapgs
212 1: movq %rdi,TF_RDI(%rsp) /* free up a GP register */
213 movq %cr2,%rdi /* preserve %cr2 before .. */
214 movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */
215 testl $PSL_I,TF_RFLAGS(%rsp)
216 jz alltraps_pushregs_no_rdi
217 sti
218 jmp alltraps_pushregs_no_rdi
219
220 /*
221 * Fast syscall entry point. We enter here with just our new %cs/%ss set,
222 * and the new privilige level. We are still running on the old user stack
223 * pointer. We have to juggle a few things around to find our stack etc.
224 * swapgs gives us access to our PCPU space only.
225 */
226 IDTVEC(fast_syscall)
227 swapgs
228 movq %rsp,PCPU(SCRATCH_RSP)
229 movq common_tss+COMMON_TSS_RSP0,%rsp
230 /* Now emulate a trapframe. Ugh. */
231 subq $TF_SIZE,%rsp
232 /* defer TF_RSP till we have a spare register */
233 movq %r11,TF_RFLAGS(%rsp)
234 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
235 movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */
236 movq %r11,TF_RSP(%rsp) /* user stack pointer */
237 sti
238 movq $KUDSEL,TF_SS(%rsp)
239 movq $KUCSEL,TF_CS(%rsp)
240 movq $2,TF_ERR(%rsp)
241 movq %rdi,TF_RDI(%rsp) /* arg 1 */
242 movq %rsi,TF_RSI(%rsp) /* arg 2 */
243 movq %rdx,TF_RDX(%rsp) /* arg 3 */
244 movq %r10,TF_RCX(%rsp) /* arg 4 */
245 movq %r8,TF_R8(%rsp) /* arg 5 */
246 movq %r9,TF_R9(%rsp) /* arg 6 */
247 movq %rax,TF_RAX(%rsp) /* syscall number */
248 movq %rbx,TF_RBX(%rsp) /* C preserved */
249 movq %rbp,TF_RBP(%rsp) /* C preserved */
250 movq %r12,TF_R12(%rsp) /* C preserved */
251 movq %r13,TF_R13(%rsp) /* C preserved */
252 movq %r14,TF_R14(%rsp) /* C preserved */
253 movq %r15,TF_R15(%rsp) /* C preserved */
254 call syscall
255 movq PCPU(CURPCB),%rax
256 testq $PCB_FULLCTX,PCB_FLAGS(%rax)
257 jne 3f
258 1: /* Check for and handle AST's on return to userland */
259 cli
260 movq PCPU(CURTHREAD),%rax
261 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
262 je 2f
263 sti
264 movq %rsp, %rdi
265 call ast
266 jmp 1b
267 2: /* restore preserved registers */
268 movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
269 movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
270 movq TF_RDX(%rsp),%rdx /* return value 2 */
271 movq TF_RAX(%rsp),%rax /* return value 1 */
272 movq TF_RBX(%rsp),%rbx /* C preserved */
273 movq TF_RBP(%rsp),%rbp /* C preserved */
274 movq TF_R12(%rsp),%r12 /* C preserved */
275 movq TF_R13(%rsp),%r13 /* C preserved */
276 movq TF_R14(%rsp),%r14 /* C preserved */
277 movq TF_R15(%rsp),%r15 /* C preserved */
278 movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
279 movq TF_RIP(%rsp),%rcx /* original %rip */
280 movq TF_RSP(%rsp),%r9 /* user stack pointer */
281 movq %r9,%rsp /* original %rsp */
282 swapgs
283 sysretq
284 3: /* Requested full context restore, use doreti for that */
285 andq $~PCB_FULLCTX,PCB_FLAGS(%rax)
286 jmp doreti
287
288 /*
289 * Here for CYA insurance, in case a "syscall" instruction gets
290 * issued from 32 bit compatability mode. MSR_CSTAR has to point
291 * to *something* if EFER_SCE is enabled.
292 */
293 IDTVEC(fast_syscall32)
294 sysret
295
296 ENTRY(fork_trampoline)
297 movq %r12, %rdi /* function */
298 movq %rbx, %rsi /* arg1 */
299 movq %rsp, %rdx /* trapframe pointer */
300 call fork_exit
301 MEXITCOUNT
302 jmp doreti /* Handle any ASTs */
303
304
305 /*
306 * Include what was once config+isa-dependent code.
307 * XXX it should be in a stand-alone file. It's still icu-dependent and
308 * belongs in i386/isa.
309 */
310 #include "amd64/isa/vector.S"
311
312 .data
313 ALIGN_DATA
314
315 /*
316 * void doreti(struct trapframe)
317 *
318 * Handle return from interrupts, traps and syscalls.
319 */
320 .text
321 SUPERALIGN_TEXT
322 .globl doreti
323 .type doreti,@function
324 doreti:
325 FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
326 /*
327 * Check if ASTs can be handled now.
328 */
329 testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */
330 jz doreti_exit /* can't handle ASTs now if not */
331
332 doreti_ast:
333 /*
334 * Check for ASTs atomically with returning. Disabling CPU
335 * interrupts provides sufficient locking evein the SMP case,
336 * since we will be informed of any new ASTs by an IPI.
337 */
338 cli
339 movq PCPU(CURTHREAD),%rax
340 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
341 je doreti_exit
342 sti
343 movq %rsp, %rdi /* pass a pointer to the trapframe */
344 call ast
345 jmp doreti_ast
346
347 /*
348 * doreti_exit: pop registers, iret.
349 *
350 * The segment register pop is a special case, since it may
351 * fault if (for example) a sigreturn specifies bad segment
352 * registers. The fault is handled in trap.c.
353 */
354 doreti_exit:
355 MEXITCOUNT
356
357 movq TF_RDI(%rsp),%rdi
358 movq TF_RSI(%rsp),%rsi
359 movq TF_RDX(%rsp),%rdx
360 movq TF_RCX(%rsp),%rcx
361 movq TF_R8(%rsp),%r8
362 movq TF_R9(%rsp),%r9
363 movq TF_RAX(%rsp),%rax
364 movq TF_RBX(%rsp),%rbx
365 movq TF_RBP(%rsp),%rbp
366 movq TF_R10(%rsp),%r10
367 movq TF_R11(%rsp),%r11
368 movq TF_R12(%rsp),%r12
369 movq TF_R13(%rsp),%r13
370 movq TF_R14(%rsp),%r14
371 movq TF_R15(%rsp),%r15
372 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
373 jz 1f /* keep running with kernel GS.base */
374 cli
375 swapgs
376 1: addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */
377 .globl doreti_iret
378 doreti_iret:
379 iretq
380
381 /*
382 * doreti_iret_fault and friends. Alternative return code for
383 * the case where we get a fault in the doreti_exit code
384 * above. trap() (i386/i386/trap.c) catches this specific
385 * case, sends the process a signal and continues in the
386 * corresponding place in the code below.
387 */
388 ALIGN_TEXT
389 .globl doreti_iret_fault
390 doreti_iret_fault:
391 subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */
392 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
393 jz 1f /* already running with kernel GS.base */
394 swapgs
395 1: testl $PSL_I,TF_RFLAGS(%rsp)
396 jz 2f
397 sti
398 2: movq %rdi,TF_RDI(%rsp)
399 movq %rsi,TF_RSI(%rsp)
400 movq %rdx,TF_RDX(%rsp)
401 movq %rcx,TF_RCX(%rsp)
402 movq %r8,TF_R8(%rsp)
403 movq %r9,TF_R9(%rsp)
404 movq %rax,TF_RAX(%rsp)
405 movq %rbx,TF_RBX(%rsp)
406 movq %rbp,TF_RBP(%rsp)
407 movq %r10,TF_R10(%rsp)
408 movq %r11,TF_R11(%rsp)
409 movq %r12,TF_R12(%rsp)
410 movq %r13,TF_R13(%rsp)
411 movq %r14,TF_R14(%rsp)
412 movq %r15,TF_R15(%rsp)
413 movq $T_PROTFLT,TF_TRAPNO(%rsp)
414 movq $0,TF_ERR(%rsp) /* XXX should be the error code */
415 jmp alltraps_with_regs_pushed
416
417 #include "amd64/isa/icu_ipl.S"
Cache object: d3f3c187886936734a5578ca51b0df76
|