1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD$
31 */
32
33 #include "opt_atpic.h"
34 #include "opt_compat.h"
35 #include "opt_kdtrace.h"
36
37 #include <machine/asmacros.h>
38 #include <machine/psl.h>
39 #include <machine/trap.h>
40
41 #include "assym.s"
42
43 #ifdef KDTRACE_HOOKS
44 .bss
45 .globl dtrace_invop_jump_addr
46 .align 8
47 .type dtrace_invop_jump_addr, @object
48 .size dtrace_invop_jump_addr, 8
49 dtrace_invop_jump_addr:
50 .zero 8
51 .globl dtrace_invop_calltrap_addr
52 .align 8
53 .type dtrace_invop_calltrap_addr, @object
54 .size dtrace_invop_calltrap_addr, 8
55 dtrace_invop_calltrap_addr:
56 .zero 8
57 #endif
58 .text
59
60 /*****************************************************************************/
61 /* Trap handling */
62 /*****************************************************************************/
63 /*
64 * Trap and fault vector routines.
65 *
66 * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes
67 * state on the stack but also disables interrupts. This is important for
68 * us for the use of the swapgs instruction. We cannot be interrupted
69 * until the GS.base value is correct. For most traps, we automatically
70 * then enable interrupts if the interrupted context had them enabled.
71 * This is equivalent to the i386 port's use of SDT_SYS386TGT.
72 *
73 * The cpu will push a certain amount of state onto the kernel stack for
74 * the current process. See amd64/include/frame.h.
75 * This includes the current RFLAGS (status register, which includes
76 * the interrupt disable state prior to the trap), the code segment register,
77 * and the return instruction pointer are pushed by the cpu. The cpu
78 * will also push an 'error' code for certain traps. We push a dummy
79 * error code for those traps where the cpu doesn't in order to maintain
80 * a consistent frame. We also push a contrived 'trap number'.
81 *
82 * The cpu does not push the general registers, we must do that, and we
83 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and
84 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
85 * must load them with appropriate values for supervisor mode operation.
86 */
87
88 MCOUNT_LABEL(user)
89 MCOUNT_LABEL(btrap)
90
91 /* Traps that we leave interrupts disabled for.. */
92 #define TRAP_NOEN(a) \
93 subq $TF_RIP,%rsp; \
94 movq $(a),TF_TRAPNO(%rsp) ; \
95 movq $0,TF_ADDR(%rsp) ; \
96 movq $0,TF_ERR(%rsp) ; \
97 jmp alltraps_noen
98 IDTVEC(dbg)
99 TRAP_NOEN(T_TRCTRAP)
100 IDTVEC(bpt)
101 TRAP_NOEN(T_BPTFLT)
102
103 /* Regular traps; The cpu does not supply tf_err for these. */
104 #define TRAP(a) \
105 subq $TF_RIP,%rsp; \
106 movq $(a),TF_TRAPNO(%rsp) ; \
107 movq $0,TF_ADDR(%rsp) ; \
108 movq $0,TF_ERR(%rsp) ; \
109 jmp alltraps
110 IDTVEC(div)
111 TRAP(T_DIVIDE)
112 IDTVEC(ofl)
113 TRAP(T_OFLOW)
114 IDTVEC(bnd)
115 TRAP(T_BOUND)
116 IDTVEC(ill)
117 TRAP(T_PRIVINFLT)
118 IDTVEC(dna)
119 TRAP(T_DNA)
120 IDTVEC(fpusegm)
121 TRAP(T_FPOPFLT)
122 IDTVEC(mchk)
123 TRAP(T_MCHK)
124 IDTVEC(rsvd)
125 TRAP(T_RESERVED)
126 IDTVEC(fpu)
127 TRAP(T_ARITHTRAP)
128 IDTVEC(xmm)
129 TRAP(T_XMMFLT)
130
131 /* This group of traps have tf_err already pushed by the cpu */
132 #define TRAP_ERR(a) \
133 subq $TF_ERR,%rsp; \
134 movq $(a),TF_TRAPNO(%rsp) ; \
135 movq $0,TF_ADDR(%rsp) ; \
136 jmp alltraps
137 IDTVEC(tss)
138 TRAP_ERR(T_TSSFLT)
139 IDTVEC(missing)
140 TRAP_ERR(T_SEGNPFLT)
141 IDTVEC(stk)
142 TRAP_ERR(T_STKFLT)
143 IDTVEC(align)
144 TRAP_ERR(T_ALIGNFLT)
145
146 /*
147 * alltraps entry point. Use swapgs if this is the first time in the
148 * kernel from userland. Reenable interrupts if they were enabled
149 * before the trap. This approximates SDT_SYS386TGT on the i386 port.
150 */
151
152 SUPERALIGN_TEXT
153 .globl alltraps
154 .type alltraps,@function
155 alltraps:
156 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
157 jz alltraps_testi /* already running with kernel GS.base */
158 swapgs
159 alltraps_testi:
160 testl $PSL_I,TF_RFLAGS(%rsp)
161 jz alltraps_pushregs
162 sti
163 alltraps_pushregs:
164 movq %rdi,TF_RDI(%rsp)
165 alltraps_pushregs_no_rdi:
166 movq %rsi,TF_RSI(%rsp)
167 movq %rdx,TF_RDX(%rsp)
168 movq %rcx,TF_RCX(%rsp)
169 movq %r8,TF_R8(%rsp)
170 movq %r9,TF_R9(%rsp)
171 movq %rax,TF_RAX(%rsp)
172 movq %rbx,TF_RBX(%rsp)
173 movq %rbp,TF_RBP(%rsp)
174 movq %r10,TF_R10(%rsp)
175 movq %r11,TF_R11(%rsp)
176 movq %r12,TF_R12(%rsp)
177 movq %r13,TF_R13(%rsp)
178 movq %r14,TF_R14(%rsp)
179 movq %r15,TF_R15(%rsp)
180 FAKE_MCOUNT(TF_RIP(%rsp))
181 #ifdef KDTRACE_HOOKS
182 /*
183 * DTrace Function Boundary Trace (fbt) and Statically Defined
184 * Trace (sdt) probes are triggered by int3 (0xcc) which causes
185 * the #BP (T_BPTFLT) breakpoint interrupt. For all other trap
186 * types, just handle them in the usual way.
187 */
188 cmpq $T_BPTFLT,TF_TRAPNO(%rsp)
189 jne calltrap
190
191 /* Check if there is no DTrace hook registered. */
192 cmpq $0,dtrace_invop_jump_addr
193 je calltrap
194
195 /*
196 * Set our jump address for the jump back in the event that
197 * the breakpoint wasn't caused by DTrace at all.
198 */
199 movq $calltrap, dtrace_invop_calltrap_addr(%rip)
200
201 /* Jump to the code hooked in by DTrace. */
202 movq dtrace_invop_jump_addr, %rax
203 jmpq *dtrace_invop_jump_addr
204 #endif
205 .globl calltrap
206 .type calltrap,@function
207 calltrap:
208 movq %rsp, %rdi
209 call trap
210 MEXITCOUNT
211 jmp doreti /* Handle any pending ASTs */
212
213 /*
214 * alltraps_noen entry point. Unlike alltraps above, we want to
215 * leave the interrupts disabled. This corresponds to
216 * SDT_SYS386IGT on the i386 port.
217 */
218 SUPERALIGN_TEXT
219 .globl alltraps_noen
220 .type alltraps_noen,@function
221 alltraps_noen:
222 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
223 jz alltraps_pushregs /* already running with kernel GS.base */
224 swapgs
225 jmp alltraps_pushregs
226
227 IDTVEC(dblfault)
228 subq $TF_ERR,%rsp
229 movq $T_DOUBLEFLT,TF_TRAPNO(%rsp)
230 movq $0,TF_ADDR(%rsp)
231 movq $0,TF_ERR(%rsp)
232 movq %rdi,TF_RDI(%rsp)
233 movq %rsi,TF_RSI(%rsp)
234 movq %rdx,TF_RDX(%rsp)
235 movq %rcx,TF_RCX(%rsp)
236 movq %r8,TF_R8(%rsp)
237 movq %r9,TF_R9(%rsp)
238 movq %rax,TF_RAX(%rsp)
239 movq %rbx,TF_RBX(%rsp)
240 movq %rbp,TF_RBP(%rsp)
241 movq %r10,TF_R10(%rsp)
242 movq %r11,TF_R11(%rsp)
243 movq %r12,TF_R12(%rsp)
244 movq %r13,TF_R13(%rsp)
245 movq %r14,TF_R14(%rsp)
246 movq %r15,TF_R15(%rsp)
247 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
248 jz 1f /* already running with kernel GS.base */
249 swapgs
250 1: movq %rsp, %rdi
251 call dblfault_handler
252 2: hlt
253 jmp 2b
254
255 IDTVEC(page)
256 subq $TF_ERR,%rsp
257 movq $T_PAGEFLT,TF_TRAPNO(%rsp)
258 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
259 jz 1f /* already running with kernel GS.base */
260 swapgs
261 1:
262 movq %rdi,TF_RDI(%rsp) /* free up a GP register */
263 movq %cr2,%rdi /* preserve %cr2 before .. */
264 movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */
265 testl $PSL_I,TF_RFLAGS(%rsp)
266 jz alltraps_pushregs_no_rdi
267 sti
268 jmp alltraps_pushregs_no_rdi
269
270 /*
271 * We have to special-case this one. If we get a trap in doreti() at
272 * the iretq stage, we'll reenter with the wrong gs state. We'll have
273 * to do a special the swapgs in this case even coming from the kernel.
274 * XXX linux has a trap handler for their equivalent of load_gs().
275 */
276 IDTVEC(prot)
277 subq $TF_ERR,%rsp
278 movq $T_PROTFLT,TF_TRAPNO(%rsp)
279 movq $0,TF_ADDR(%rsp)
280 movq %rdi,TF_RDI(%rsp) /* free up a GP register */
281 leaq doreti_iret(%rip),%rdi
282 cmpq %rdi,TF_RIP(%rsp)
283 je 2f /* kernel but with user gsbase!! */
284 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
285 jz 1f /* already running with kernel GS.base */
286 2:
287 swapgs
288 1:
289 testl $PSL_I,TF_RFLAGS(%rsp)
290 jz alltraps_pushregs_no_rdi
291 sti
292 jmp alltraps_pushregs_no_rdi
293
294 /*
295 * Fast syscall entry point. We enter here with just our new %cs/%ss set,
296 * and the new privilige level. We are still running on the old user stack
297 * pointer. We have to juggle a few things around to find our stack etc.
298 * swapgs gives us access to our PCPU space only.
299 */
300 IDTVEC(fast_syscall)
301 swapgs
302 movq %rsp,PCPU(SCRATCH_RSP)
303 movq PCPU(RSP0),%rsp
304 /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */
305 subq $TF_SIZE,%rsp
306 /* defer TF_RSP till we have a spare register */
307 movq %r11,TF_RFLAGS(%rsp)
308 movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
309 movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */
310 movq %r11,TF_RSP(%rsp) /* user stack pointer */
311 sti
312 movq $KUDSEL,TF_SS(%rsp)
313 movq $KUCSEL,TF_CS(%rsp)
314 movq $2,TF_ERR(%rsp)
315 movq %rdi,TF_RDI(%rsp) /* arg 1 */
316 movq %rsi,TF_RSI(%rsp) /* arg 2 */
317 movq %rdx,TF_RDX(%rsp) /* arg 3 */
318 movq %r10,TF_RCX(%rsp) /* arg 4 */
319 movq %r8,TF_R8(%rsp) /* arg 5 */
320 movq %r9,TF_R9(%rsp) /* arg 6 */
321 movq %rax,TF_RAX(%rsp) /* syscall number */
322 movq %rbx,TF_RBX(%rsp) /* C preserved */
323 movq %rbp,TF_RBP(%rsp) /* C preserved */
324 movq %r12,TF_R12(%rsp) /* C preserved */
325 movq %r13,TF_R13(%rsp) /* C preserved */
326 movq %r14,TF_R14(%rsp) /* C preserved */
327 movq %r15,TF_R15(%rsp) /* C preserved */
328 FAKE_MCOUNT(TF_RIP(%rsp))
329 movq %rsp, %rdi
330 call syscall
331 movq PCPU(CURPCB),%rax
332 testq $PCB_FULLCTX,PCB_FLAGS(%rax)
333 jne 3f
334 1: /* Check for and handle AST's on return to userland */
335 cli
336 movq PCPU(CURTHREAD),%rax
337 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
338 je 2f
339 sti
340 movq %rsp, %rdi
341 call ast
342 jmp 1b
343 2: /* restore preserved registers */
344 MEXITCOUNT
345 movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
346 movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
347 movq TF_RDX(%rsp),%rdx /* return value 2 */
348 movq TF_RAX(%rsp),%rax /* return value 1 */
349 movq TF_RBX(%rsp),%rbx /* C preserved */
350 movq TF_RBP(%rsp),%rbp /* C preserved */
351 movq TF_R12(%rsp),%r12 /* C preserved */
352 movq TF_R13(%rsp),%r13 /* C preserved */
353 movq TF_R14(%rsp),%r14 /* C preserved */
354 movq TF_R15(%rsp),%r15 /* C preserved */
355 movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
356 movq TF_RIP(%rsp),%rcx /* original %rip */
357 movq TF_RSP(%rsp),%r9 /* user stack pointer */
358 movq %r9,%rsp /* original %rsp */
359 swapgs
360 sysretq
361 3: /* Requested full context restore, use doreti for that */
362 andq $~PCB_FULLCTX,PCB_FLAGS(%rax)
363 MEXITCOUNT
364 jmp doreti
365
366 /*
367 * Here for CYA insurance, in case a "syscall" instruction gets
368 * issued from 32 bit compatability mode. MSR_CSTAR has to point
369 * to *something* if EFER_SCE is enabled.
370 */
371 IDTVEC(fast_syscall32)
372 sysret
373
374 /*
375 * NMI handling is special.
376 *
377 * First, NMIs do not respect the state of the processor's RFLAGS.IF
378 * bit and the NMI handler may be invoked at any time, including when
379 * the processor is in a critical section with RFLAGS.IF == 0. In
380 * particular, this means that the processor's GS.base values could be
381 * inconsistent on entry to the handler, and so we need to read
382 * MSR_GSBASE to determine if a 'swapgs' is needed. We use '%ebx', a
383 * C-preserved register, to remember whether to swap GS back on the
384 * exit path.
385 *
386 * Second, the processor treats NMIs specially, blocking further NMIs
387 * until an 'iretq' instruction is executed. We therefore need to
388 * execute the NMI handler with interrupts disabled to prevent a
389 * nested interrupt from executing an 'iretq' instruction and
390 * inadvertently taking the processor out of NMI mode.
391 */
392
393 IDTVEC(nmi)
394 subq $TF_RIP,%rsp
395 movq $(T_NMI),TF_TRAPNO(%rsp)
396 movq $0,TF_ADDR(%rsp)
397 movq $0,TF_ERR(%rsp)
398 movq %rdi,TF_RDI(%rsp)
399 movq %rsi,TF_RSI(%rsp)
400 movq %rdx,TF_RDX(%rsp)
401 movq %rcx,TF_RCX(%rsp)
402 movq %r8,TF_R8(%rsp)
403 movq %r9,TF_R9(%rsp)
404 movq %rax,TF_RAX(%rsp)
405 movq %rbx,TF_RBX(%rsp)
406 movq %rbp,TF_RBP(%rsp)
407 movq %r10,TF_R10(%rsp)
408 movq %r11,TF_R11(%rsp)
409 movq %r12,TF_R12(%rsp)
410 movq %r13,TF_R13(%rsp)
411 movq %r14,TF_R14(%rsp)
412 movq %r15,TF_R15(%rsp)
413 xorl %ebx,%ebx
414 testb $SEL_RPL_MASK,TF_CS(%rsp)
415 jnz nmi_needswapgs /* we came from userland */
416 movl $MSR_GSBASE,%ecx
417 rdmsr
418 cmpl $VM_MAXUSER_ADDRESS >> 32,%edx
419 jae nmi_calltrap /* GS.base holds a kernel VA */
420 nmi_needswapgs:
421 incl %ebx
422 swapgs
423 /* Note: this label is also used by ddb and gdb: */
424 nmi_calltrap:
425 FAKE_MCOUNT(TF_RIP(%rsp))
426 movq %rsp, %rdi
427 call trap
428 MEXITCOUNT
429 testl %ebx,%ebx
430 jz nmi_restoreregs
431 swapgs
432 nmi_restoreregs:
433 movq TF_RDI(%rsp),%rdi
434 movq TF_RSI(%rsp),%rsi
435 movq TF_RDX(%rsp),%rdx
436 movq TF_RCX(%rsp),%rcx
437 movq TF_R8(%rsp),%r8
438 movq TF_R9(%rsp),%r9
439 movq TF_RAX(%rsp),%rax
440 movq TF_RBX(%rsp),%rbx
441 movq TF_RBP(%rsp),%rbp
442 movq TF_R10(%rsp),%r10
443 movq TF_R11(%rsp),%r11
444 movq TF_R12(%rsp),%r12
445 movq TF_R13(%rsp),%r13
446 movq TF_R14(%rsp),%r14
447 movq TF_R15(%rsp),%r15
448 addq $TF_RIP,%rsp
449 iretq
450
451 ENTRY(fork_trampoline)
452 movq %r12, %rdi /* function */
453 movq %rbx, %rsi /* arg1 */
454 movq %rsp, %rdx /* trapframe pointer */
455 call fork_exit
456 MEXITCOUNT
457 jmp doreti /* Handle any ASTs */
458
459 /*
460 * To efficiently implement classification of trap and interrupt handlers
461 * for profiling, there must be only trap handlers between the labels btrap
462 * and bintr, and only interrupt handlers between the labels bintr and
463 * eintr. This is implemented (partly) by including files that contain
464 * some of the handlers. Before including the files, set up a normal asm
465 * environment so that the included files doen't need to know that they are
466 * included.
467 */
468
469 #ifdef COMPAT_IA32
470 .data
471 .p2align 4
472 .text
473 SUPERALIGN_TEXT
474
475 #include <amd64/ia32/ia32_exception.S>
476 #endif
477
478 .data
479 .p2align 4
480 .text
481 SUPERALIGN_TEXT
482 MCOUNT_LABEL(bintr)
483
484 #include <amd64/amd64/apic_vector.S>
485
486 #ifdef DEV_ATPIC
487 .data
488 .p2align 4
489 .text
490 SUPERALIGN_TEXT
491
492 #include <amd64/isa/atpic_vector.S>
493 #endif
494
495 .text
496 MCOUNT_LABEL(eintr)
497
498 /*
499 * void doreti(struct trapframe)
500 *
501 * Handle return from interrupts, traps and syscalls.
502 */
503 .text
504 SUPERALIGN_TEXT
505 .type doreti,@function
506 doreti:
507 FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
508 /*
509 * Check if ASTs can be handled now.
510 */
511 testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */
512 jz doreti_exit /* can't handle ASTs now if not */
513
514 doreti_ast:
515 /*
516 * Check for ASTs atomically with returning. Disabling CPU
517 * interrupts provides sufficient locking eve in the SMP case,
518 * since we will be informed of any new ASTs by an IPI.
519 */
520 cli
521 movq PCPU(CURTHREAD),%rax
522 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
523 je doreti_exit
524 sti
525 movq %rsp, %rdi /* pass a pointer to the trapframe */
526 call ast
527 jmp doreti_ast
528
529 /*
530 * doreti_exit: pop registers, iret.
531 *
532 * The segment register pop is a special case, since it may
533 * fault if (for example) a sigreturn specifies bad segment
534 * registers. The fault is handled in trap.c.
535 */
536 doreti_exit:
537 MEXITCOUNT
538 movq TF_RDI(%rsp),%rdi
539 movq TF_RSI(%rsp),%rsi
540 movq TF_RDX(%rsp),%rdx
541 movq TF_RCX(%rsp),%rcx
542 movq TF_R8(%rsp),%r8
543 movq TF_R9(%rsp),%r9
544 movq TF_RAX(%rsp),%rax
545 movq TF_RBX(%rsp),%rbx
546 movq TF_RBP(%rsp),%rbp
547 movq TF_R10(%rsp),%r10
548 movq TF_R11(%rsp),%r11
549 movq TF_R12(%rsp),%r12
550 movq TF_R13(%rsp),%r13
551 movq TF_R14(%rsp),%r14
552 movq TF_R15(%rsp),%r15
553 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
554 jz 1f /* keep running with kernel GS.base */
555 cli
556 swapgs
557 1: addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */
558 .globl doreti_iret
559 doreti_iret:
560 iretq
561
562 /*
563 * doreti_iret_fault. Alternative return code for
564 * the case where we get a fault in the doreti_exit code
565 * above. trap() (amd64/amd64/trap.c) catches this specific
566 * case, sends the process a signal and continues in the
567 * corresponding place in the code below.
568 */
569 ALIGN_TEXT
570 .globl doreti_iret_fault
571 doreti_iret_fault:
572 subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */
573 testl $PSL_I,TF_RFLAGS(%rsp)
574 jz 1f
575 sti
576 1: movq %rdi,TF_RDI(%rsp)
577 movq %rsi,TF_RSI(%rsp)
578 movq %rdx,TF_RDX(%rsp)
579 movq %rcx,TF_RCX(%rsp)
580 movq %r8,TF_R8(%rsp)
581 movq %r9,TF_R9(%rsp)
582 movq %rax,TF_RAX(%rsp)
583 movq %rbx,TF_RBX(%rsp)
584 movq %rbp,TF_RBP(%rsp)
585 movq %r10,TF_R10(%rsp)
586 movq %r11,TF_R11(%rsp)
587 movq %r12,TF_R12(%rsp)
588 movq %r13,TF_R13(%rsp)
589 movq %r14,TF_R14(%rsp)
590 movq %r15,TF_R15(%rsp)
591 movq $T_PROTFLT,TF_TRAPNO(%rsp)
592 movq $0,TF_ERR(%rsp) /* XXX should be the error code */
593 movq $0,TF_ADDR(%rsp)
594 FAKE_MCOUNT(TF_RIP(%rsp))
595 jmp calltrap
Cache object: efef094ebd23f76c2fce5fcfca9d8ce0
|